diff --git a/.gitignore b/.gitignore index 2a1aff013..f3c069265 100644 --- a/.gitignore +++ b/.gitignore @@ -6,14 +6,17 @@ settings/keys/* *.dot reports ENV -venv +.env .DS_Store build deploy/last-update logs/* -cache/* celerybeat.pid celerybeat-schedule .gitignore~ -static/scss/**/*.css.map -*.retry \ No newline at end of file +assets/* +*.ipynb +dump.rdb +Pipfile.lock + +*.css.map \ No newline at end of file diff --git a/.python-version b/.python-version new file mode 100644 index 000000000..a9f8d1be3 --- /dev/null +++ b/.python-version @@ -0,0 +1 @@ +3.9.11 diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 96d4df1d0..000000000 --- a/.travis.yml +++ /dev/null @@ -1,25 +0,0 @@ -language: python - -python: - - '2.7' - -services: - - redis-server - - mysql - -env: - global: - - DJANGO_SETTINGS_MODULE=regluit.settings.travis - - PYTHONPATH=/home/travis/build/EbookFoundation/ - -before_install: - - sudo mkdir /var/log/django - - sudo chmod 777 /var/log/django - - mkdir ~/build/EbookFoundation/regluit/settings/keys/ - - cp ~/build/EbookFoundation/regluit/settings/dummy/__init__.py ~/build/EbookFoundation/regluit/settings/keys/__init__.py - - openssl aes-256-cbc -K $encrypted_56eb2b7cc527_key -iv $encrypted_56eb2b7cc527_iv -in ~/build/EbookFoundation/regluit/test/travis-host.py.enc -out ~/build/EbookFoundation/regluit/settings/keys/host.py -d - -install: - - pip install -r requirements_versioned.pip - -script: django-admin test diff --git a/Pipfile b/Pipfile new file mode 100644 index 000000000..93e44e5d2 --- /dev/null +++ b/Pipfile @@ -0,0 +1,43 @@ +[[source]] +url = "https://pypi.org/simple" +verify_ssl = true +name = "pypi" + +[packages] +celery = "==4.4.7" +django = "==1.11.29" +django-contrib-comments = "==2.0.0" +django-extensions = "==3.1.1" +django-registration = "==2.4.1" +social-auth-app-django = "==2.1.0" +django-tastypie = "==0.14.1" +django-el-pagination = "==3.2.4" +django-selectable = "==1.1.0" +django-notification = {editable = true, ref = "1ad2be4adf3551a3471d923380368341452e178a", git = "git+https://github.com/eshellman/django-notification.git"} +django-email-change = {editable = true, ref = "fb063296cbf4e4a6d8a93d34d98fe0c7739c2e0d", git = "git+https://github.com/eshellman/django-email-change.git"} +django-ckeditor = "==5.6.1" +django-storages = "==1.5.2" +sorl-thumbnail = "==12.6.3" +django-mptt = "==0.8.6" +pyepub = "==0.5.0" +django-sass-processor = "==0.8.2" +mysqlclient = "==1.4.6" +mailchimp3 = "==3.0.14" +boto3 = "==1.17.91" +pymarc = "==4.2.1" +beautifulsoup4 = "==4.11.1" +gitberg = "==0.8.7" +risparser = "==0.4.3" +pyoai = "==2.5.0" +django-jsonfield = "==1.0.0" +mechanize = "==0.4.5" +stripe = "==2.76.0" +selenium = "==3.141.0" +requests-mock = "==1.8.0" +redis = "==3.5.3" +xhtml2pdf = ">=0.2.15" +pillow = "==9.5.0" +pypdf = ">=5.0.0" + +[requires] +python_version = "3.9" diff --git a/README.md b/README.md index 66fc4a291..73a2dca03 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ regluit - "The Unglue.it web application and website" ======= -This repo - https://github.com/EbookFoundation/regluit will be the place for collaborative development for Unglue.it. Add issues and submit pull requests here. As of January 19, 2017, https://github.com/Gluejar/regluit is still being used for production builds. +Another repo - https://github.com/EbookFoundation/regluit will eventually be the place for collaborative development for Unglue.it. Add issues and submit pull requests there. As of September 1, 2019, https://github.com/Gluejar/regluit is still being used for production builds. The first version of the unglue.it codebase was a services-oriented project named "unglu". We decided that "unglu" was too complicated, so we started over and named the new project "regluit". @@ -10,49 +10,24 @@ contains four main applications: `core`, `frontend`, `api` and `payment` that ca and configured on as many ec2 instances that are needed to support traffic. The partitioning between these modules is not as clean as would be ideal. `payment` is particularly messy because we had to retool it twice because we had to switch from Paypal to Amazon Payments to Stripe. -regluit was originally developed on Django 1.3 (python 2.7) and currently runs on Django 1.8. +regluit was originally developed on Django 1.3 (python 2.7) and currently runs on Django 1.11 Python 3.8). -Development (Vagrant + Virtualbox) -------- - -The recommended method for local development is to create a virtual machine with [Vagrant](https://www.vagrantup.com/) and [Virtualbox](https://www.virtualbox.org/wiki/Downloads). -With this method, the only requirements on the host machine are `virtualbox` and `vagrant`. -Vagrant will use the `ansible-local` provisioner, therefore installing python and ansible on the host machine is not necessary. - -__Instructions for Ubuntu 16:__ -1. Install virtualbox: `sudo apt-get install virtualbox` -2. Install vagrant: `sudo apt-get install vagrant` -3. Clone the `EbookFoundation/regluit` repository. -4. Navigate to the base directory of the cloned repo (where `Vagrantfile` is located). -5. Run `vagrant up` to create the VM, install dependencies, and start necessary services. - * Note: This step may take up to 15 minutes to complete. -6. Once the VM has been created, run `vagrant ssh` to log in to the virtual machine you just created. If provisioning was successful, you should see a success message upon login. - * If virtualenv doesn't activate upon login, you can do it manually by running `cd /opt/regluit && source venv/bin/activate` -7. Within the VM, run `./manage.py runserver 0.0.0.0:8000` to start the Django development server. -8. On your host machine, open your web browser of choice and navigate to `http://127.0.0.1:8000` - -__Instructions for other platforms (Windows/OSX):__ -* Steps are essentially the same, except for the installation of Vagrant and Virtualbox. Refer to each package's documentation for specific installation instructions. - -_NOTE:_ If running Windows on your host machine, ensure you are running `vagrant up` from an elevated command prompt, e.g. right click on Command Prompt -> Run As Administrator. - - -Development (Host Machine) +Develop ------- Here are some instructions for setting up regluit for development on -an Ubuntu system. If you are on OS X see notes below -to install python-setuptools in step 1: +an Ubuntu system. If you are on OS X see notes below. -1. Ensure MySQL and Redis are installed & running on your system. + +- Ensure MySQL 5.7 and Redis are installed & running on your system. 1. Create a MySQL database and user for unglueit. 1. `sudo apt-get upgrade gcc` -1. `sudo apt-get install python-setuptools git python-lxml build-essential libssl-dev libffi-dev python2.7-dev libxml2-dev libxslt-dev libmysqlclient-dev` +1. `sudo apt-get install python-setuptools git python-lxml build-essential libssl-dev libffi-dev python3.8-dev libxml2-dev libxslt-dev libmysqlclient-dev` 1. `sudo easy_install virtualenv virtualenvwrapper` 1. `git clone git@github.com:Gluejar/regluit.git` 1. `cd regluit` 1. `mkvirtualenv regluit` -1. `pip install -r requirements_versioned.pip` +1. `pip install -r requirements.txt` 1. `add2virtualenv ..` 1. `cp settings/dev.py settings/me.py` 1. `mkdir settings/keys/` @@ -63,8 +38,9 @@ to install python-setuptools in step 1: 1. `deactivate ; workon regluit` 1. `django-admin.py migrate --noinput` 1. `django-admin.py loaddata core/fixtures/initial_data.json core/fixtures/bookloader.json` populate database with test data to run properly. -1. `django-admin.py celeryd --loglevel=INFO` start the celery daemon to perform asynchronous tasks like adding related editions, and display logging information in the foreground. -1. `django-admin.py celerybeat -l INFO` to start the celerybeat daemon to handle scheduled tasks. +1. `redis-server` to start the task broker +1. `celery -A regluit worker --loglevel=INFO ` start the celery daemon to perform asynchronous tasks like adding related editions, and display logging information in the foreground. Add ` --logfile=logs/celery.log` if you want the logs to go into a log file. +1. `celery -A regluit beat --loglevel=INFO` to start the celerybeat daemon to handle scheduled tasks. 1. `django-admin.py runserver 0.0.0.0:8000` (you can change the port number from the default value of 8000) 1. make sure a [redis server](https://redis.io/topics/quickstart) is running 1. Point your browser to http://localhost:8000/ @@ -77,105 +53,33 @@ CSS development Production Deployment --------------------- -OBSOLETE -Below are the steps for getting regluit running on EC2 with Apache and mod_wsgi, and talking to an Amazon Relational Data Store instance. -Instructions for setting please are slightly different. - -1. create an ubuntu ec2 instance (e.g, go http://alestic.com/ to find various ubuntu images) -1. `sudo aptitude update` -1. `sudo aptitude upgrade` -1. `sudo aptitude install git-core apache libapache2-mod-wsgi mysql-client python-virtualenv python-mysqldb redis-server python-lxml postfix python-dev libmysqlclient-dev` -1. `sudo mkdir /opt/regluit` -1. `sudo chown ubuntu:ubuntu /opt/regluit` -1. `cd /opt` -1. `git config --global user.name "Raymond Yee"` -1. `git config --global user.email "rdhyee@gluejar.com"` -1. `ssh-keygen` -1. add `~/.ssh/id\_rsa.pub` as a deploy key on github https://github.com/Gluejar/regluit/admin/keys -1. `git clone git@github.com:Gluejar/regluit.git` -1. `cd /opt/regluit` -1. create an Amazon RDS instance -1. connect to it, e.g. `mysql -u root -h gluejardb.cboagmr25pjs.us-east-1.rds.amazonaws.com -p` -1. `CREATE DATABASE unglueit CHARSET utf8;` -1. `GRANT ALL ON unglueit.\* TO ‘unglueit’@’ip-10-244-250-168.ec2.internal’ IDENTIFIED BY 'unglueit' REQUIRE SSL;` -1. update settings/prod.py with database credentials -1. `virtualenv ENV` -1. `source ENV/bin/activate` -1. `pip install -r requirements_versioned.pip` -1. `echo "/opt/" > ENV/lib/python2.7/site-packages/regluit.pth` -1. `django-admin.py syncdb --migrate --settings regluit.settings.prod` -1. `sudo mkdir /var/www/static` -1. `sudo chown ubuntu:ubuntu /var/www/static` -1. `django-admin.py collectstatic --settings regluit.settings.prod` -1. `sudo ln -s /opt/regluit/deploy/regluit.conf /etc/apache2/sites-available/regluit` -1. `sudo a2ensite regluit` -1. `sudo a2enmod ssl rewrite` -1. `cd /home/ubuntu` -1. copy SSL server key to `/etc/ssl/private/server.key` -1. copy SSL certificate to `/etc/ssl/certs/server.crt` -1. `sudo /etc/init.d/apache2 restart` -1. `sudo adduser --no-create-home celery --disabled-password --disabled-login` (just enter return for all?) -1. `sudo cp deploy/celeryd /etc/init.d/celeryd` -1. `sudo chmod 755 /etc/init.d/celeryd` -1. `sudo cp deploy/celeryd.conf /etc/default/celeryd` -1. `sudo mkdir /var/log/celery` -1. `sudo mkdir /var/run/celery` -1. `sudo chown celery:celery /var/log/celery /var/run/celery` -1. `sudo /etc/init.d/celeryd start` -1. `sudo cp deploy/celerybeat /etc/init.d/celerybeat` -1. `sudo chmod 755 /etc/init.d/celerybeat` -1. `sudo cp deploy/celerybeat.conf /etc/default/celerybeat` -1. `sudo mkdir /var/log/celerybeat` -1. `sudo chown celery:celery /var/log/celerybeat` -1. `sudo /etc/init.d/celerybeat start` - -## setup to enable ckeditor to work properly - -1. `mkdir /var/www/static/media/` -1. `sudo chown ubuntu:www-data /var/www/static/media/` - - -Updating Production --------------------- - -1. Study the latest changes in the master branch, especially keep in mind how -it has [changed from what's in production](https://github.com/Gluejar/regluit/compare/production...master). -1. Update the production branch accordingly. If everything in `master` is ready to be moved into `production`, you can just merge `master` into `production`. Otherwise, you can grab specific parts. (How to do so is something that should probably be described in greater detail.) -1. Login to unglue.it and run [`/opt/regluit/deploy/update-prod`](https://github.com/Gluejar/regluit/blob/master/deploy/update-prod) - +See http://github.com/EbookFoundation/regluit-provisioning OS X Developer Notes ------------------- To run regluit on OS X you should have XCode installed -Install virtualenvwrapper according -to the process at http://blog.praveengollakota.com/47430655: +Install MySQL: + `brew install mysql@5.7` + `mysql_secure_installation` + `mysqld_safe --user=root -p` + -1. `sudo easy\_install pip` -1. `sudo pip install virtualenv` -1. `pip install virtualenvwrapper` +We use pyenv and pipenv to set up an environment. Edit or create .bashrc in ~ to enable virtualenvwrapper commands: -1. `mkdir ~/.virtualenvs` -1. Edit .bashrc to include the following lines: - - export WORKON_HOME=$HOME/.virtualenvs - source your_path_to_virtualenvwrapper.sh_here -In the above web site, the path to virtualenvwrapper.sh was -/Library/Frameworks/Python.framework/Versions/2.7/bin/virtualenvwrapper.sh -In Snow Leopard, this may be /usr/local/bin/virtualenvwrapper.sh +1. `pipenv install -r requirements.txt` +1. Edit .zshrc to include the following lines: -Configure Terminal to automatically notice this at startup: -Terminal –> Preferences –> Settings –> Shell -Click "run command"; add `source ~/.bashrc` + `eval "$(pyenv init -)"` + `export PATH=$PATH:/Applications/Postgres.app/Contents/Versions/10/bin` + `export PATH=$PATH:/usr/local/opt/mysql-client/bin:$PATH` + `export ANSIBLE_VAULT_PASSWORD_FILE=PATH_TO_VAULT_PASSWORD` -If you get 'EnvironmentError: mysql_config not found' -edit the line ~/.virtualenvs/regluit/build/MySQL-python/setup_posix.py -1. mysql_config.path = "mysql_config" -to be (using a path that exists on your system) -1. mysql_config.path = "/usr/local/mysql-5.5.20-osx10.6-x86_64/bin/mysql_config" +If you get `EnvironmentError: mysql_config not found` +you might need to set a path to mysqlconfig You may need to set utf8 in /etc/my.cnf collation-server = utf8_unicode_ci @@ -183,15 +87,6 @@ collation-server = utf8_unicode_ci init-connect='SET NAMES utf8' character-set-server = utf8 -Selenium Install ---------------- - -Download the selenium server: -http://selenium.googlecode.com/files/selenium-server-standalone-2.5.0.jar - -Start the selenium server: -'java -jar selenium-server-standalone-2.5.0.jar' - MARC Records ------------ @@ -232,7 +127,12 @@ MARC Records * if you have records with both DIRECT and UNGLUE links, you'll need two MARCRecord instances * if you have both kinds of link, put them in _separate_ records, as marc_format can only take one value +MySQL Migration +--------------- + +## 5.7 - 8.0 Notes -# vagrant / ansible +* Many migration blockers were removed by by dumping, then restoring the database. +* After that, RDS was able to migrate +* needed to create the unglueit user from the mysql client -[How to build machines using Vagrant/ansible](docs/vagrant_ansible.md) diff --git a/Vagrantfile b/Vagrantfile deleted file mode 100644 index 0ad9cdbc6..000000000 --- a/Vagrantfile +++ /dev/null @@ -1,56 +0,0 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : - -# All Vagrant configuration is done below. The "2" in Vagrant.configure -# configures the configuration version (we support older styles for -# backwards compatibility). Please don't change it unless you know what -# you're doing. -Vagrant.configure("2") do |config| - # The most common configuration options are documented and commented below. - # For a complete reference, please see the online documentation at - # https://docs.vagrantup.com. - # Every Vagrant development environment requires a box. You can search for - # boxes at https://vagrantcloud.com/search. - config.vm.box = "ubuntu/xenial64" - - # Disable automatic box update checking. If you disable this, then - # boxes will only be checked for updates when the user runs - # `vagrant box outdated`. This is not recommended. - config.vm.box_check_update = false - - # Setup specific for local machine - config.vm.define "regluit-local", primary: true do |local| - # Create a private network - local.vm.network "private_network", type: "dhcp" - local.vm.hostname = "regluit-local" - - # VirtuaLBox provider settings for running locally with Oracle VirtualBox - # --uartmode1 disconnected is necessary to disable serial interface, which - # is known to cause issues with Ubuntu 16 VM's - local.vm.provider "virtualbox" do |vb| - vb.name = "regluit-local" - vb.memory = 1024 - vb.cpus = 2 - vb.customize [ "modifyvm", :id, "--uartmode1", "disconnected" ] - end - - end - - config.vm.synced_folder ".", "/vagrant", disabled: true - config.vm.synced_folder ".", "/opt/regluit" - - config.vm.network "forwarded_port", guest: 8000, host: 8000 - - # Provision node with Ansible running on the Vagrant host - # This requires you have Ansible installed locally - # Vagrant autogenerates an ansible inventory file to use - config.vm.provision "ansible_local" do |ansible| - ansible.playbook = "/opt/regluit/provisioning/setup-regluit.yml" - ansible.provisioning_path = "/opt/regluit" - ansible.verbose = true - ansible.install = true - end - - config.vm.post_up_message = "Successfully created regluit-local VM. Run 'vagrant ssh' to log in and start the development server." - -end diff --git a/__init__.py b/__init__.py index e69de29bb..09eec0846 100755 --- a/__init__.py +++ b/__init__.py @@ -0,0 +1,8 @@ +from __future__ import absolute_import, unicode_literals + +# This will make sure the app is always imported when +# Django starts so that shared_task will use this app. +from .celery_module import app as celery_app + +__all__ = ('celery_app',) + diff --git a/api/onix.py b/api/onix.py index c2c3eed49..afa7308e0 100644 --- a/api/onix.py +++ b/api/onix.py @@ -1,81 +1,125 @@ import datetime -import pytz import re -from lxml import etree + +from bs4 import BeautifulSoup +import pytz + +from django.core.paginator import Paginator, InvalidPage + +from regluit.bisac import Bisac from regluit.core import models from regluit.core.cc import ccinfo -from regluit.bisac import Bisac from .crosswalks import relator_contrib, iso639 -feed_xml = """ - + +WORKS_PER_PAGE = 30 + +feed_header = """ + """ +feed_xml = feed_header + ''' +''' +soup = None bisac = Bisac() -def text_node(tag, text, attrib={}): - node = etree.Element(tag, attrib=attrib) - node.text = text +def text_node(tag, text, attrib=None): + node = soup.new_tag(tag) + if attrib: + node.attrs = attrib + node.string = text return node -def onix_feed(facet, max=None): - feed = etree.fromstring(feed_xml) - feed.append(header(facet)) +def sub_element(node, tag, attrib=None): + sub = soup.new_tag(tag) + if attrib: + sub.attrs = attrib + node.append(sub) + return sub + + +def onix_feed(facet, max=None, page_number=None): + global soup + if not soup: + soup = BeautifulSoup('', 'lxml') + + yield feed_header + str(header(facet)) works = facet.works[0:max] if max else facet.works + + if page_number is not None: + try: + p = Paginator(works, WORKS_PER_PAGE) + works = p.page(page_number) + except InvalidPage: + works = models.Work.objects.none() + for work in works: - editions = models.Edition.objects.filter(work=work,ebooks__isnull=False) - editions = facet.facet_object.filter_model("Edition",editions).distinct() + editions = models.Edition.objects.filter(work=work, ebooks__isnull=False) + editions = facet.facet_object.filter_model("Edition", editions).distinct() for edition in editions: edition_prod = product(edition, facet.facet_object) if edition_prod is not None: - feed.append(edition_prod) - return etree.tostring(feed, pretty_print=True) - + yield edition_prod + yield '' + def onix_feed_for_work(work): - feed = etree.fromstring(feed_xml) - feed.append(header(work)) - for edition in models.Edition.objects.filter(work=work,ebooks__isnull=False).distinct(): + global soup + if not soup: + soup = BeautifulSoup('', 'lxml') + + feed = BeautifulSoup(feed_xml, 'xml') + feed.ONIXMessage.append(header(work)) + for edition in models.Edition.objects.filter(work=work, ebooks__isnull=False).distinct(): edition_prod = product(edition) if edition_prod is not None: - feed.append(product(edition)) - return etree.tostring(feed, pretty_print=True) - + feed.ONIXMessage.append(product(edition)) + return str(feed) + def header(facet=None): - header_node = etree.Element("Header") - sender_node = etree.Element("Sender") + header_node = soup.new_tag("Header") + sender_node = soup.new_tag("Sender") sender_node.append(text_node("SenderName", "unglue.it")) sender_node.append(text_node("EmailAddress", "unglueit@ebookfoundation.org")) header_node.append(sender_node) - header_node.append(text_node("SentDateTime", pytz.utc.localize(datetime.datetime.utcnow()).strftime('%Y%m%dT%H%M%SZ'))) + header_node.append(text_node( + "SentDateTime", + pytz.utc.localize(datetime.datetime.utcnow()).strftime('%Y%m%dT%H%M%SZ') + )) header_node.append(text_node("MessageNote", facet.title if facet else "Unglue.it Editions")) return header_node def product(edition, facet=None): - ebooks=facet.filter_model("Ebook",edition.ebooks.filter(active=True)) if facet else edition.ebooks.filter(active=True) - ebooks=ebooks.order_by('-created') - # Just because an edition satisfies 2 facets with multiple ebooks doesn't mean that there is a single ebook satisfies both facets + ebooks = facet.filter_model( + "Ebook", + edition.ebooks.filter(active=True) + ) if facet else edition.ebooks.filter(active=True) + ebooks = ebooks.order_by('-created') + # Just because an edition satisfies 2 facets with multiple ebooks doesn't mean that there + # is a single ebook satisfies both facets if not ebooks.exists(): return None - - work=edition.work - product_node = etree.Element("Product") - product_node.append(text_node("RecordReference", "it.unglue.work.%s.%s" % (work.id, edition.id))) - product_node.append(text_node("NotificationType", "03" )) # final - - ident_node = etree.SubElement(product_node, "ProductIdentifier") - ident_node.append(text_node("ProductIDType", "01" )) #proprietary - ident_node.append(text_node("IDTypeName", "unglue.it edition id" )) #proprietary - ident_node.append(text_node("IDValue", unicode(edition.id) )) - + + work = edition.work + product_node = soup.new_tag("Product") + product_node.append(text_node( + "RecordReference", "it.unglue.work.%s.%s" % (work.id, edition.id) + )) + product_node.append(text_node("NotificationType", "03")) # final + + ident_node = sub_element(product_node, "ProductIdentifier") + ident_node.append(text_node("ProductIDType", "01")) #proprietary + ident_node.append(text_node("IDTypeName", "unglue.it edition id")) #proprietary + ident_node.append(text_node("IDValue", str(edition.id))) + # wrong isbn better than no isbn isbn = edition.isbn_13 if edition.isbn_13 else edition.work.first_isbn_13() if isbn: - ident_node = etree.SubElement(product_node, "ProductIdentifier") - ident_node.append(text_node("ProductIDType", "03" )) #proprietary - ident_node.append(text_node("IDValue", isbn )) + ident_node = sub_element(product_node, "ProductIdentifier") + ident_node.append(text_node("ProductIDType", "03")) #proprietary + ident_node.append(text_node("IDValue", isbn)) # Descriptive Detail Block - descriptive_node = etree.SubElement(product_node, "DescriptiveDetail") - descriptive_node.append(text_node("ProductComposition", "00" )) # single item - descriptive_node.append(text_node("ProductForm", "ED" )) # download + descriptive_node = sub_element(product_node, "DescriptiveDetail") + descriptive_node.append(text_node("ProductComposition", "00")) # single item + descriptive_node.append(text_node("ProductForm", "ED")) # download ebook = None latest_ebooks = [] @@ -84,129 +128,131 @@ def product(edition, facet=None): if ebook.format not in ebook_formats: ebook_formats.append(ebook.format) latest_ebooks.append(ebook) - if ebook.format=='epub': - descriptive_node.append(text_node("ProductFormDetail", "E101" )) - elif ebook.format=='pdf': - descriptive_node.append(text_node("ProductFormDetail", "E107" )) - elif ebook.format=='mobi': - descriptive_node.append(text_node("ProductFormDetail", "E116" )) + if ebook.format == 'epub': + descriptive_node.append(text_node("ProductFormDetail", "E101")) + elif ebook.format == 'pdf': + descriptive_node.append(text_node("ProductFormDetail", "E107")) + elif ebook.format == 'mobi': + descriptive_node.append(text_node("ProductFormDetail", "E116")) if ebook.rights: - license_node = etree.SubElement(descriptive_node, "EpubLicense") - license_node.append(text_node("EpubLicenseName", ebook.rights )) - lic_expr_node = etree.SubElement(license_node, "EpubLicenseExpression") - lic_expr_node.append(text_node("EpubLicenseExpressionType", '01' )) #human readable - lic_expr_node.append(text_node("EpubLicenseExpressionLink", ccinfo(ebook.rights).url )) - - title_node = etree.SubElement(descriptive_node, "TitleDetail") - title_node.append(text_node("TitleType", '01' )) #distinctive title - title_el = etree.SubElement(title_node, "TitleElement") - title_el.append(text_node("TitleElementLevel", '01' )) - title_el.append(text_node("TitleText", edition.title )) + license_node = sub_element(descriptive_node, "EpubLicense") + license_node.append(text_node("EpubLicenseName", ebook.rights)) + lic_expr_node = sub_element(license_node, "EpubLicenseExpression") + lic_expr_node.append(text_node("EpubLicenseExpressionType", '01')) #human readable + lic_expr_node.append(text_node("EpubLicenseExpressionLink", ccinfo(ebook.rights).url)) + + title_node = sub_element(descriptive_node, "TitleDetail") + title_node.append(text_node("TitleType", '01')) #distinctive title + title_el = sub_element(title_node, "TitleElement") + title_el.append(text_node("TitleElementLevel", '01')) + title_el.append(text_node("TitleText", edition.title)) contrib_i = 0 for contrib in edition.relators.all(): - contrib_i+=1 - contrib_node = etree.SubElement(descriptive_node, "Contributor") - contrib_node.append(text_node("SequenceNumber", unicode(contrib_i ))) - contrib_node.append(text_node("ContributorRole", relator_contrib.get(contrib.relation.code,"") )) + contrib_i += 1 + contrib_node = sub_element(descriptive_node, "Contributor") + contrib_node.append(text_node("SequenceNumber", str(contrib_i))) + contrib_node.append(text_node("ContributorRole", + relator_contrib.get(contrib.relation.code, ""))) contrib_node.append(text_node("PersonName", contrib.author.name)) contrib_node.append(text_node("PersonNameInverted", contrib.author.last_name_first)) (lang, locale) = (edition.work.language, None) if '_' in lang: (lang, locale) = lang.split('_') - if len(lang)==2: + if len(lang) == 2: lang = iso639.get(lang, None) if lang: - lang_node = etree.SubElement(descriptive_node, "Language") + lang_node = sub_element(descriptive_node, "Language") lang_node.append(text_node("LanguageRole", "01")) lang_node.append(text_node("LanguageCode", lang)) if locale: lang_node.append(text_node("CountryCode", locale)) for subject in work.subjects.all(): - subj_node = etree.SubElement(descriptive_node, "Subject") + subj_node = sub_element(descriptive_node, "Subject") if subject.authority == 'lcsh': subj_node.append(text_node("SubjectSchemeIdentifier", "04")) - subj_node.append(text_node("SubjectHeadingText", subject.name)) + subj_node.append(text_node("SubjectHeadingText", subject.name)) elif subject.authority == 'lcc': subj_node.append(text_node("SubjectSchemeIdentifier", "03")) subj_node.append(text_node("SubjectCode", subject.name)) - elif subject.authority == 'bisacsh': + elif subject.authority == 'bisacsh': subj_node.append(text_node("SubjectSchemeIdentifier", "10")) subj_node.append(text_node("SubjectCode", bisac.code(subject.name))) - subj_node.append(text_node("SubjectHeadingText", subject.name)) + subj_node.append(text_node("SubjectHeadingText", subject.name)) else: subj_node.append(text_node("SubjectSchemeIdentifier", "20")) - subj_node.append(text_node("SubjectHeadingText", subject.name)) + subj_node.append(text_node("SubjectHeadingText", subject.name)) # audience range composite if work.age_level: range_match = re.search(r'(\d?\d?)-(\d?\d?)', work.age_level) if range_match: - audience_range_node = etree.SubElement(descriptive_node, "AudienceRange") - audience_range_node.append(text_node("AudienceRangeQualifier", "17")) #Interest age, years + audience_range_node = sub_element(descriptive_node, "AudienceRange") + #Interest age, years + audience_range_node.append(text_node("AudienceRangeQualifier", "17")) if range_match.group(1): audience_range_node.append(text_node("AudienceRangePrecision", "03")) #from - audience_range_node.append(text_node("AudienceRangeValue", range_match.group(1))) + audience_range_node.append(text_node("AudienceRangeValue", range_match.group(1))) if range_match.group(2): audience_range_node.append(text_node("AudienceRangePrecision", "04")) #from - audience_range_node.append(text_node("AudienceRangeValue", range_match.group(2))) - + audience_range_node.append(text_node("AudienceRangeValue", range_match.group(2))) + # Collateral Detail Block - coll_node = etree.SubElement(product_node, "CollateralDetail") - desc_node = etree.SubElement(coll_node, "TextContent") + coll_node = sub_element(product_node, "CollateralDetail") + desc_node = sub_element(coll_node, "TextContent") desc_node.append(text_node("TextType", '03')) # description desc_node.append(text_node("ContentAudience", '00')) #unrestricted - desc = (work.description if work.description else '') + '

Listed by Unglue.it.' % work.id - try : - content = etree.XML("
" + desc + "
") - content_node = etree.SubElement(desc_node, "Text", attrib={"textformat":"05"}) #xhtml - content_node.append(content) - except etree.XMLSyntaxError: - content_node = etree.SubElement(desc_node, "Text", attrib={"textformat":"02"}) #html - content_node.text = etree.CDATA(desc) - supp_node = etree.SubElement(coll_node, "SupportingResource") + desc = (work.description if work.description else '') + \ + '

Listed by Unglue.it.' % work.id + content = BeautifulSoup('
' + desc + '
', 'lxml') + content_node = sub_element(desc_node, "Text", attrib={"textformat":"05"}) #xhtml + content_node.append(content.body.div) + supp_node = sub_element(coll_node, "SupportingResource") supp_node.append(text_node("ResourceContentType", '01')) #front cover supp_node.append(text_node("ContentAudience", '00')) #unrestricted supp_node.append(text_node("ResourceMode", '03')) #image - cover_node = etree.SubElement(supp_node, "ResourceVersion") + cover_node = sub_element(supp_node, "ResourceVersion") cover_node.append(text_node("ResourceForm", '01')) #linkable - coverfeat_node = etree.SubElement(cover_node, "ResourceVersionFeature") + coverfeat_node = sub_element(cover_node, "ResourceVersionFeature") coverfeat_node.append(text_node("ResourceVersionFeatureType", '01')) #image format coverfeat_node.append(text_node("FeatureValue", 'D502')) #jpeg cover_node.append(text_node("ResourceLink", edition.cover_image_thumbnail())) #link # Publishing Detail Block - pubdetail_node = etree.SubElement(product_node, "PublishingDetail") + pubdetail_node = sub_element(product_node, "PublishingDetail") if edition.publisher_name: - pub_node = etree.SubElement(pubdetail_node, "Publisher") + pub_node = sub_element(pubdetail_node, "Publisher") pub_node.append(text_node("PublishingRole", '01')) #publisher pub_node.append(text_node("PublisherName", edition.publisher_name.name)) pubdetail_node.append(text_node("PublishingStatus", '00')) #unspecified - + #consumers really want a pub date - publication_date = edition.publication_date if edition.publication_date else edition.work.earliest_publication_date + publication_date = edition.publication_date if edition.publication_date else \ + edition.work.earliest_publication_date if publication_date: - pubdate_node = etree.SubElement(pubdetail_node, "PublishingDate") + pubdate_node = sub_element(pubdetail_node, "PublishingDate") pubdate_node.append(text_node("PublishingDateRole", '01')) #nominal pub date - pubdate_node.append(text_node("Date", publication_date.replace('-',''))) - + pubdate_node.append(text_node("Date", publication_date.replace('-', ''))) + # Product Supply Block - supply_node = etree.SubElement(product_node,"ProductSupply") - market_node = etree.SubElement(supply_node,"Market") - terr_node = etree.SubElement(market_node,"Territory") + supply_node = sub_element(product_node, "ProductSupply") + market_node = sub_element(supply_node, "Market") + terr_node = sub_element(market_node, "Territory") terr_node.append(text_node("RegionsIncluded", 'WORLD')) - supply_detail_node = etree.SubElement(supply_node,"SupplyDetail") - supplier_node = etree.SubElement(supply_detail_node,"Supplier") + supply_detail_node = sub_element(supply_node, "SupplyDetail") + supplier_node = sub_element(supply_detail_node, "Supplier") supplier_node.append(text_node("SupplierRole", '11')) #non-exclusive distributer supplier_node.append(text_node("SupplierName", 'Unglue.it')) #non-exclusive distributer for ebook in latest_ebooks: - website_node = etree.SubElement(supplier_node,"Website") + website_node = sub_element(supplier_node, "Website") website_node.append(text_node("WebsiteRole", '29')) #full content - website_node.append(text_node("WebsiteDescription", '%s file download' % ebook.format, attrib={'textformat':'06'})) #full content + #full content + website_node.append(text_node("WebsiteDescription", + '%s file download' % ebook.format, + attrib={'textformat':'06'})) website_node.append(text_node("WebsiteLink", ebook.download_url)) #full content supply_detail_node.append(text_node("ProductAvailability", '20')) #Available - price_node = etree.SubElement(supply_detail_node,"Price") + price_node = sub_element(supply_detail_node, "Price") price_node.append(text_node("PriceType", '01')) #retail excluding tax price_node.append(text_node("PriceAmount", '0.00')) #retail excluding tax price_node.append(text_node("CurrencyCode", 'USD')) #retail excluding tax return product_node - \ No newline at end of file diff --git a/api/opds.py b/api/opds.py index 907f0efd5..7b514b561 100644 --- a/api/opds.py +++ b/api/opds.py @@ -1,32 +1,36 @@ +import datetime from itertools import islice +import logging +from urllib.parse import urlparse, urlunparse -from lxml import etree -import datetime -import urlparse -from django.core.urlresolvers import reverse +from bs4 import BeautifulSoup +import pytz + +from django.core.cache import cache +from django.urls import reverse from django.utils.http import urlquote -import pytz -import logging -logger = logging.getLogger(__name__) from regluit.core import models, facets import regluit.core.cc as cc licenses = cc.LICENSE_LIST +logger = logging.getLogger(__name__) +soup = None FORMAT_TO_MIMETYPE = {'pdf':"application/pdf", 'epub':"application/epub+zip", 'mobi':"application/x-mobipocket-ebook", 'html':"text/html", 'text':"text/html"} -UNGLUEIT_URL= 'https://unglue.it' -ACQUISITION = "application/atom+xml;profile=opds-catalog;kind=acquisition" +UNGLUEIT_URL = 'https://unglue.it' +ACQUISITION = "application/atom+xml; profile=opds-catalog ;kind=acquisition; charset=utf-8" +NAVIGATION = "application/atom+xml; profile=opds-catalog; kind=navigation; charset=utf-8" FACET_RELATION = "http://opds-spec.org/facet" -old_facets= ["creative_commons","active_campaigns"] +old_facets = ["creative_commons", "active_campaigns"] def feeds(): @@ -40,50 +44,56 @@ def feeds(): def get_facet_class(name): if name in old_facets: return globals()[name] - else: - return get_facet_facet(name) - - + return get_facet_facet(name) + + def text_node(tag, text): - node = etree.Element(tag) - node.text = text + node = soup.new_tag(tag) + if text: + node.string = text return node def html_node(tag, html): node = text_node(tag, html) - node.attrib.update({"{http://www.w3.org/2005/Atom}type":'html'}) + node.attrs.update({"type":'html'}) return node - + def add_query_component(url, qc): """ add component qc to the querystring of url """ - m = list(urlparse.urlparse(url)) - if len(m[4]): - m[4] = "&".join([m[4],qc]) + m = list(urlparse(url)) + if m[4]: + m[4] = "&".join([m[4], qc]) else: m[4] = qc - return urlparse.urlunparse(m) + return urlunparse(m) def isbn_node(isbn): - node = etree.Element("{http://purl.org/dc/terms/}identifier") - node.attrib.update({"{http://www.w3.org/2001/XMLSchema-instance}type":'dcterms:URI'}) - node.text = 'urn:ISBN:'+ isbn + node = soup.new_tag("dcterms:identifier") + node.attrs.update({"xsi:type":'dcterms:URI'}) + node.string = 'urn:ISBN:'+ isbn return node def work_node(work, facet=None): - - node = etree.Element("entry") + + node = soup.new_tag("entry") # title node.append(text_node("title", work.title)) - + # id - node.append(text_node('id', "{base}{url}".format(base=UNGLUEIT_URL,url=reverse('work_identifier',kwargs={'work_id':work.id})))) - + node.append(text_node( + 'id', + "{base}{url}".format( + base=UNGLUEIT_URL, + url=reverse('work_identifier', kwargs={'work_id': work.id}) + ) + )) + updated = None - + # links for all ebooks - ebooks = facet.filter_model("Ebook",work.ebooks()) if facet else work.ebooks() + ebooks = facet.filter_model("Ebook", work.ebooks()) if facet else work.ebooks() versions = set() for ebook in ebooks: if updated is None: @@ -92,78 +102,85 @@ def work_node(work, facet=None): node.append(text_node('updated', updated)) if not ebook.version_label in versions: versions.add(ebook.version_label) - link_node = etree.Element("link") - + link_node = soup.new_tag("link") + # ebook.download_url is an absolute URL with the protocol, domain, and path baked in - link_rel = "http://opds-spec.org/acquisition/open-access" - link_node.attrib.update({"href":add_query_component(ebook.download_url, "feed=opds"), - "rel":link_rel, - "{http://purl.org/dc/terms/}rights": str(ebook.rights)}) - if ebook.is_direct(): - link_node.attrib["type"] = FORMAT_TO_MIMETYPE.get(ebook.format, "") + link_rel = "http://opds-spec.org/acquisition/open-access" + link_node.attrs.update({ + "href":add_query_component(ebook.download_url, "feed=opds"), + "rel":link_rel, + "dcterms:rights": str(ebook.rights) + }) + if ebook.is_direct(): + link_node["type"] = FORMAT_TO_MIMETYPE.get(ebook.format, "") else: - """ indirect acquisition, i.e. google books """ - link_node.attrib["type"] = "text/html" - indirect = etree.Element("{http://opds-spec.org/}indirectAcquisition",) - indirect.attrib["type"] = FORMAT_TO_MIMETYPE.get(ebook.format, "") + # indirect acquisition, i.e. google books + link_node["type"] = "text/html" + indirect = soup.new_tag("opds:indirectAcquisition",) + indirect["type"] = FORMAT_TO_MIMETYPE.get(ebook.format, "") link_node.append(indirect) if ebook.version_label: - link_node.attrib.update({"{http://schema.org/}version": ebook.version_label}) + link_node.attrs.update({"schema:version": ebook.version_label}) node.append(link_node) - + # get the cover -- assume jpg? - - cover_node = etree.Element("link") - cover_node.attrib.update({"href":work.cover_image_small(), - "type":"image/"+work.cover_filetype(), - "rel":"http://opds-spec.org/image/thumbnail"}) + + cover_node = soup.new_tag("link") + cover_node.attrs.update({ + "href": work.cover_image_small(), + "type": "image/" + work.cover_filetype(), + "rel": "http://opds-spec.org/image/thumbnail" + }) node.append(cover_node) - cover_node = etree.Element("link") - cover_node.attrib.update({"href":work.cover_image_thumbnail(), - "type":"image/"+work.cover_filetype(), - "rel":"http://opds-spec.org/image"}) + cover_node = soup.new_tag("link") + cover_node.attrs.update({ + "href": work.cover_image_thumbnail(), + "type": "image/" + work.cover_filetype(), + "rel": "http://opds-spec.org/image" + }) node.append(cover_node) - - + + # 2012 - node.append(text_node("{http://purl.org/dc/terms/}issued", work.publication_date)) - + node.append(text_node("dcterms:issued", work.publication_date)) + # author # TO DO: include all authors? - author_node = etree.Element("author") + author_node = soup.new_tag("author") author_node.append(text_node("name", work.author())) node.append(author_node) - + # publisher #Open Book Publishers - if len(work.publishers()): + if work.publishers().exists(): for publisher in work.publishers(): - node.append(text_node("{http://purl.org/dc/terms/}publisher", publisher.name.name)) - + node.append(text_node("dcterms:publisher", publisher.name.name)) + # language #en - node.append(text_node("{http://purl.org/dc/terms/}language", work.language)) - + node.append(text_node("dcterms:language", work.language)) + # description - node.append(html_node("{http://www.w3.org/2005/Atom}content", work.description)) - + node.append(html_node("content", work.description)) + # identifiers if work.identifiers.filter(type='isbn'): for isbn in work.identifiers.filter(type='isbn')[0:9]: #10 should be more than enough node.append(isbn_node(isbn.value)) - + # subject tags # [[subject.name for subject in work.subjects.all()] for work in ccworks if work.subjects.all()] for subject in work.subjects.all(): if subject.is_visible: - category_node = etree.Element("category") + category_node = soup.new_tag("category") try: - category_node.attrib["term"] = subject.name + category_node["term"] = subject.name node.append(category_node) try: subject.works.filter(is_free=True)[1] # only show feed if there's another work in it - append_navlink(node, 'related', 'kw.'+ subject.name , 0, 'popular', title=subject.name) + node.append(navlink('related', 'kw.' + subject.name, 0, + 'popular', title=subject.name)) except: pass except ValueError: @@ -172,48 +189,53 @@ def work_node(work, facet=None): subject.delete() # age level - # + # if work.age_level: - category_node = etree.Element("category") - category_node.attrib["scheme"] = 'http://schema.org/typicalAgeRange' - category_node.attrib["term"] = work.age_level - category_node.attrib["label"] = work.get_age_level_display() + category_node = soup.new_tag("category") + category_node["scheme"] = 'http://schema.org/typicalAgeRange' + category_node["term"] = work.age_level + category_node["label"] = work.get_age_level_display() node.append(category_node) - - - # rating - rating_node = etree.Element("{http://schema.org/}Rating") - rating_node.attrib.update({"{http://schema.org/}ratingValue":"{:}".format(work.priority())}) + + + # rating + rating_node = soup.new_tag("schema:Rating") + rating_node.attrs.update({"schema:ratingValue":"{:}".format(work.priority())}) node.append(rating_node) return node class Facet: title = '' - works = None + works = models.Work.objects.none() feed_path = '' description = '' - + def feed(self, page=None, order_by='newest'): self.works = self.works.order_by(*facets.get_order_by(order_by)) return opds_feed_for_works(self, page=page, order_by=order_by) - + def updated(self): # return the creation date for most recently added item - if not self.works: + key = f"{self.feed_path.replace(' ', '_')}_updated" + if not self.works.exists(): return pytz.utc.localize(datetime.datetime.utcnow()).isoformat() - else: - return pytz.utc.localize(self.works[0].created).isoformat() + value = cache.get(key) + if value is None: + value = pytz.utc.localize(self.works.latest('created').created).isoformat() + cache.set(key, value, 100000) + return value def get_facet_facet(facet_path): class Facet_Facet(Facet): - + def __init__(self, facet_path=facet_path): self.feed_path = facet_path self.facet_object = facets.get_facet_object(facet_path) self.title = "Unglue.it" for facet in self.facet_object.facets(): self.title = self.title + " " + facet.title - self.works = self.facet_object.get_query_set().distinct() + self.works = self.facet_object.get_query_set() self.description = self.facet_object.description return Facet_Facet @@ -221,11 +243,14 @@ class creative_commons(Facet): def __init__(self): self.title = "Unglue.it Catalog: Creative Commons Books" self.feed_path = "creative_commons" - self.works = models.Work.objects.filter(editions__ebooks__isnull=False, - editions__ebooks__rights__in=cc.LICENSE_LIST).distinct() - self.description= "These Creative Commons licensed ebooks are free to read - the people who created them want you to read and share them." + self.works = models.Work.objects.filter( + editions__ebooks__isnull=False, + editions__ebooks__rights__in=cc.LICENSE_LIST + ) + self.description = """These Creative Commons licensed ebooks are free to read - the people + who created them want you to read and share them.""" self.facet_object = facets.get_facet_object(self.feed_path) - + class active_campaigns(Facet): """ return opds feed for works associated with active campaigns @@ -233,115 +258,136 @@ class active_campaigns(Facet): def __init__(self): self.title = "Unglue.it Catalog: Books under Active Campaign" self.feed_path = "active_campaigns" - self.works = models.Work.objects.filter(campaigns__status='ACTIVE', is_free = True) - self.description= "With your help we're raising money to make these books free to the world." + self.works = models.Work.objects.filter(campaigns__status='ACTIVE', is_free=True) + self.description = """With your help we're raising money + to make these books free to the world.""" self.facet_object = facets.get_facet_object(self.feed_path) def opds_feed_for_work(work_id): class single_work_facet: def __init__(self, work_id): try: - works=models.Work.objects.filter(id=work_id) + works = models.Work.objects.filter(id=work_id) except models.Work.DoesNotExist: - works=models.Work.objects.none() + works = models.Work.objects.none() except ValueError: # not a valid work_id - works=models.Work.objects.none() - self.works=works - self.title='Unglue.it work #%s' % work_id - self.feed_path='' - self.facet_object= facets.BaseFacet(None) - return opds_feed_for_works( single_work_facet(work_id) ) + works = models.Work.objects.none() + self.works = works + self.title = 'Unglue.it work #%s' % work_id + self.feed_path = '' + self.facet_object = facets.BaseFacet(None) + return opds_feed_for_works(single_work_facet(work_id)) def opds_feed_for_works(the_facet, page=None, order_by='newest'): - works = the_facet.works + global soup + if not soup: + soup = BeautifulSoup('', 'lxml') + works = the_facet.works.distinct() feed_path = the_facet.feed_path title = the_facet.title - feed_xml = """ + """ - - feed = etree.fromstring(feed_xml) - + xsi:schemaLocation="http://purl.org/dc/elements/1.1/ http://dublincore.org/schemas/xmls/qdc/2008/02/11/dc.xsd + http://purl.org/dc/terms/ http://dublincore.org/schemas/xmls/qdc/2008/02/11/dcterms.xsd"> + """ + + yield feed_header + # add title # TO DO: will need to calculate the number items and where in the feed we are - - feed.append(text_node('title', title + ' - sorted by ' + order_by)) - - # id - - feed.append(text_node('id', "{url}/api/opds/{feed_path}/?order_by={order_by}".format(url=UNGLUEIT_URL, - feed_path=urlquote(feed_path), order_by=order_by))) - + + yield text_node('title', title + ' - sorted by ' + order_by).prettify() + + # id + + feed = text_node( + 'id', + "{url}/api/opds/{feed_path}/?order_by={order_by}".format( + url=UNGLUEIT_URL, + feed_path=urlquote(feed_path), + order_by=order_by, + ), + ) + yield feed.prettify() + # updated # TO DO: fix time zone? # also use our wrapped datetime code - - feed.append(text_node('updated', - pytz.utc.localize(datetime.datetime.utcnow()).isoformat())) - + + feed = text_node('updated', pytz.utc.localize(datetime.datetime.utcnow()).isoformat()) + yield feed.prettify() + # author - - author_node = etree.Element("author") + + author_node = soup.new_tag("author") author_node.append(text_node('name', 'unglue.it')) author_node.append(text_node('uri', UNGLUEIT_URL)) - feed.append(author_node) - + yield author_node.prettify() + # links: start, self, next/prev (depending what's necessary -- to start with put all CC books) - + # start link - append_navlink(feed, 'start', feed_path, None , order_by, title="First 10") - + yield navlink('start', feed_path, None, order_by, title="First 10").prettify() + # next link - + if not page: - page =0 + page = 0 else: try: - page=int(page) + page = int(page) except TypeError: - page=0 - + page = 0 + try: works[10 * page + 10] - append_navlink(feed, 'next', feed_path, page+1 , order_by, title="Next 10") + yield navlink('next', feed_path, page+1, order_by, title="Next 10").prettify() except IndexError: pass - + # sort facets - append_navlink(feed, FACET_RELATION, feed_path, None, 'popular', group="Order", active = order_by=='popular', title="Sorted by popularity") - append_navlink(feed, FACET_RELATION, feed_path, None, 'newest', group="Order", active = order_by=='newest', title="Sorted by newest") - + yield navlink(FACET_RELATION, feed_path, None, 'popular', group="Order", + active=order_by == 'popular', title="Sorted by popularity").prettify() + yield navlink(FACET_RELATION, feed_path, None, 'newest', group="Order", + active=order_by == 'newest', title="Sorted by newest").prettify() + #other facets if feed_path not in old_facets: for other_group in the_facet.facet_object.get_other_groups(): for facet_object in other_group.get_facets(): - append_navlink(feed, FACET_RELATION, feed_path + '/' + facet_object.facet_name, None, order_by, group=other_group.title, title=facet_object.title) - - works = islice(works, 10 * page, 10 * page + 10) + yield navlink(FACET_RELATION, feed_path + '/' + facet_object.facet_name, + None, order_by, group=other_group.title, + title=facet_object.title).prettify() + + works = islice(works, 10 * page, 10 * page + 10) if page > 0: - append_navlink(feed, 'previous', feed_path, page-1, order_by, title="Previous 10") + yield navlink('previous', feed_path, page-1, order_by, title="Previous 10").prettify() + for work in works: - node = work_node(work, facet=the_facet.facet_object) - feed.append(node) - - return etree.tostring(feed, pretty_print=True) - -def append_navlink(feed, rel, path, page, order_by, group=None, active=None , title=""): - link = etree.Element("link") - link.attrib.update({"rel":rel, - "href": UNGLUEIT_URL + "/api/opds/" + urlquote(path) + '/?order_by=' + order_by + ('&page=' + unicode(page) if page!=None else ''), - "type": ACQUISITION, - "title": title, - }) + yield work_node(work, facet=the_facet.facet_object).prettify() + + yield ''' +''' + +def navlink(rel, path, page, order_by, group=None, active=None, title=""): + link = soup.new_tag("link") + link.attrs.update({ + "rel":rel, + "href": UNGLUEIT_URL + "/api/opds/" + urlquote(path) + '/?order_by=' + order_by + ( + '&page=' + str(page) if page is not None else '' + ), + "type": ACQUISITION, + "title": title, + }) if rel == FACET_RELATION: if group: - link.attrib['{http://opds-spec.org/}facetGroup'] = group + link['opds:facetGroup'] = group if active: - link.attrib['{http://opds-spec.org/}activeFacet'] = 'true' - feed.append(link) \ No newline at end of file + link['opds:activeFacet'] = 'true' + return link diff --git a/api/opds_json.py b/api/opds_json.py index 59e218029..4007b99ae 100644 --- a/api/opds_json.py +++ b/api/opds_json.py @@ -1,26 +1,22 @@ -from itertools import islice - import datetime -import urlparse -from django.core.urlresolvers import reverse -from django.utils.http import urlquote +from itertools import islice +import logging import json + import pytz -import logging -logger = logging.getLogger(__name__) +from django.urls import reverse +from django.utils.http import urlquote from regluit.core import models, facets import regluit.core.cc as cc + from .opds import ( - feeds, - get_facet_class, add_query_component, - Facet, - get_facet_facet, - opds_feed_for_work, ) +logger = logging.getLogger(__name__) + licenses = cc.LICENSE_LIST FORMAT_TO_MIMETYPE = {'pdf':"application/pdf", @@ -29,7 +25,7 @@ 'html':"text/html", 'text':"text/html"} -UNGLUEIT_URL= 'https://unglue.it' +UNGLUEIT_URL = 'https://unglue.it' ACQUISITION = "application/opds+json" FACET_RELATION = "opds:facet" JSONCONTEXT = "http://opds-spec.org/opds.jsonld" @@ -42,24 +38,22 @@ def feeds(): def get_facet_class(name): return get_facet_facet(name) - + def text_node(tag, text): return {tag:text} def html_node(tag, html): return {tag:html} - + def isbn_node(isbn): return 'urn:ISBN:'+ isbn def work_node(work, facet=None): - - - metadata = {"@type": "http://schema.org/EBook", + metadata = { + "@type": "http://schema.org/EBook", "id": "{base}{url}".format( base=UNGLUEIT_URL, - url=reverse('work_identifier', - kwargs={'work_id':work.id}) + url=reverse('work_identifier', kwargs={'work_id':work.id}) ) } links = [] @@ -73,7 +67,7 @@ def work_node(work, facet=None): } # title metadata["title"] = work.title - + # id links.append({ "rel": "self", @@ -84,18 +78,18 @@ def work_node(work, facet=None): ), "type": "application/opds-publication+json" }) - + updated = None - + # links for all ebooks - ebooks = facet.filter_model("Ebook",work.ebooks()) if facet else work.ebooks() + ebooks = facet.filter_model("Ebook", work.ebooks()) if facet else work.ebooks() versions = set() for ebook in ebooks: if updated is None: # most recent ebook, first ebook in loop updated = ebook.created.isoformat() - metadata['updated'] = updated + metadata['updated'] = updated if not ebook.version_label in versions: versions.add(ebook.version_label) # ebook.download_url is an absolute URL with the protocol, domain, and path baked in @@ -104,19 +98,19 @@ def work_node(work, facet=None): "href": add_query_component(ebook.download_url, "feed=opds"), "rights": str(ebook.rights) } - if ebook.is_direct(): + if ebook.is_direct(): acquire["type"] = FORMAT_TO_MIMETYPE.get(ebook.format, "") else: - """ indirect acquisition, i.e. google books """ + # indirect acquisition, i.e. google books acquire["type"] = "text/html" acquire["indirectAcquisition"] = { "type": FORMAT_TO_MIMETYPE.get(ebook.format) } if ebook.version_label: acquire["version"] = ebook.version_label - + acquires.append(acquire) - + # get the cover -- assume jpg? if work.cover_image_small(): cover_node = { @@ -130,80 +124,85 @@ def work_node(work, facet=None): "type": "image/"+work.cover_filetype(), } images.append(cover_node2) - - + + # 2012 metadata["issued"] = work.publication_date - + # author # TO DO: include all authors? - metadata["author"] = work.author() - + metadata["author"] = work.author() + # publisher #Open Book Publishers - if len(work.publishers()): - metadata["publishers"] = [{"publisher": publisher.name.name} - for publisher in work.publishers()] - + if work.publishers().exists(): + metadata["publishers"] = [ + {"publisher": publisher.name.name} for publisher in work.publishers() + ] # language metadata["language"] = work.language - + # description metadata["summary"] = work.description - + # identifiers if work.identifiers.filter(type='isbn'): - metadata['identifiers'] = [isbn_node(isbn.value) - for isbn in work.identifiers.filter(type='isbn')[0:9]] #10 should be more than enough + metadata['identifiers'] = [ + isbn_node(isbn.value) for isbn in work.identifiers.filter(type='isbn')[0:9] + ] # 10 should be more than enough - # subject tags - subjects = [subject.name for subject in work.subjects.all()] + subjects = [subject.name for subject in work.subjects.all()] if subjects: metadata["subjects"] = subjects # age level - # + # if work.age_level: age_level_node_attrib = {} age_level_node = {"category": age_level_node_attrib} - age_level_node_attrib["scheme"] = 'http://schema.org/typicalAgeRange' - age_level_node_attrib["term"] = work.age_level - age_level_node_attrib["label"] = work.get_age_level_display() + age_level_node_attrib["scheme"] = 'http://schema.org/typicalAgeRange' + age_level_node_attrib["term"] = work.age_level + age_level_node_attrib["label"] = work.get_age_level_display() metadata.update(age_level_node) - - - # rating + + + # rating metadata["rating"] = {"ratingValue":"{:}".format(work.priority())} return content class Facet: title = '' - works = None + works = models.Work.objects.none() feed_path = '' description = '' - + def feed(self, page=None, order_by='newest'): self.works = self.works.order_by(*facets.get_order_by(order_by)) return opds_feed_for_works(self, page=page, order_by=order_by) - + def updated(self): # return the creation date for most recently added item - if not self.works: + key = f"{self.feed_path.replace(' ', '_')}_updated" + if not self.works.exists(): return pytz.utc.localize(datetime.datetime.utcnow()).isoformat() - else: - return pytz.utc.localize(self.works[0].created).isoformat() + value = cache.get(key) + if value is None: + value = pytz.utc.localize(self.works.latest('created').created).isoformat() + cache.set(key, value, 100000) + return value def get_facet_facet(facet_path): class Facet_Facet(Facet): - + def __init__(self, facet_path=facet_path): self.feed_path = facet_path self.facet_object = facets.get_facet_object(facet_path) self.title = "Unglue.it" for facet in self.facet_object.facets(): self.title = self.title + " " + facet.title - self.works = self.facet_object.get_query_set().distinct() + self.works = self.facet_object.get_query_set() self.description = self.facet_object.description return Facet_Facet @@ -214,38 +213,37 @@ class NullFacet(facets.BaseFacet): def get_other_groups(self): return[] try: - works=models.Work.objects.filter(id=work_id) + works = models.Work.objects.filter(id=work_id) except models.Work.DoesNotExist: - works=models.Work.objects.none() + works = models.Work.objects.none() except ValueError: # not a valid work_id - works=models.Work.objects.none() - self.works=works - self.title='Unglue.it work #%s' % work_id - self.feed_path='' - self.facet_object= NullFacet(None) - return opds_feed_for_works( single_work_facet(work_id) ) + works = models.Work.objects.none() + self.works = works + self.title = 'Unglue.it work #%s' % work_id + self.feed_path = '' + self.facet_object = NullFacet(None) + return opds_feed_for_works(single_work_facet(work_id)) def opds_feed_for_works(the_facet, page=None, order_by='newest'): if order_by == 'none': books_per_page = 50000 + order_by = 'newest' else: books_per_page = 50 - works = the_facet.works + works = the_facet.works.distinct() feed_path = the_facet.feed_path title = the_facet.title metadata = {"title": title} links = [] - feedlist = [] - feed = {"@context": JSONCONTEXT, "metadata": metadata, "links": links, "publications": feedlist} - + # add title # TO DO: will need to calculate the number items and where in the feed we are - + metadata['title'] = title + ' - sorted by ' + order_by - + # links: start, self, next/prev (depending what's necessary -- to start with put all CC books) - + if not page: page = 0 else: @@ -255,37 +253,55 @@ def opds_feed_for_works(the_facet, page=None, order_by='newest'): page = 0 # self link - append_navlink(feed, 'self', feed_path, page , order_by, title="First {}".format(books_per_page)) - - # next link + append_navlink(links, 'self', feed_path, page, order_by, + title="First {}".format(books_per_page)) + + # next link try: works[books_per_page * page + books_per_page] - append_navlink(feed, 'next', feed_path, page+1 , order_by, - title="Next {}".format(books_per_page)) + append_navlink(links, 'next', feed_path, page+1, order_by, + title="Next {}".format(books_per_page)) except IndexError: pass - + # sort facets - append_navlink(feed, FACET_RELATION, feed_path, None, 'popular', group="Order", active = order_by=='popular', title="Sorted by popularity") - append_navlink(feed, FACET_RELATION, feed_path, None, 'newest', group="Order", active = order_by=='newest', title="Sorted by newest") - + append_navlink(links, FACET_RELATION, feed_path, None, 'popular', group="Order", + active=order_by == 'popular', title="Sorted by popularity") + append_navlink(links, FACET_RELATION, feed_path, None, 'newest', group="Order", + active=order_by == 'newest', title="Sorted by newest") + #other facets for other_group in the_facet.facet_object.get_other_groups(): for facet_object in other_group.get_facets(): - append_navlink(feed, FACET_RELATION, feed_path + '/' + facet_object.facet_name, None, order_by, group=other_group.title, title=facet_object.title) - - works = islice(works, books_per_page * page, books_per_page * page + books_per_page) + append_navlink( + links, FACET_RELATION, + feed_path + '/' + facet_object.facet_name, None, order_by, + group=other_group.title, title=facet_object.title + ) + + works = islice(works, books_per_page * page, books_per_page * page + books_per_page) if page > 0: - append_navlink(feed, 'previous', feed_path, page-1, order_by, title="Previous {}".format(books_per_page)) + append_navlink(links, 'previous', feed_path, page-1, order_by, + title="Previous {}".format(books_per_page)) + + yield '{' + f""" +"@context": {JSONCONTEXT}, +"metadata": {json.dumps(metadata, indent=2,)}, +"links": {json.dumps(links, indent=2,)}, +"publications": +[ +""" + for work in works: node = work_node(work, facet=the_facet.facet_object) - feedlist.append(node) - return json.dumps(feed,indent=2, separators=(',', ': '), sort_keys=False) + yield json.dumps(node, indent=2) + ',\r' + yield '\r]\r}' -def append_navlink(feed, rel, path, page, order_by, group=None, active=None , title=""): - link = { +def append_navlink(links, rel, path, page, order_by, group=None, active=None, title=""): + link = { "rel": rel, - "href": UNGLUEIT_URL + "/api/opdsjson/" + urlquote(path) + '/?order_by=' + order_by + ('&page=' + unicode(page) ), + "href": UNGLUEIT_URL + "/api/opdsjson/" + urlquote(path) + + '/?order_by=' + order_by + '&page=' + str(page), "type": ACQUISITION, "title": title, } @@ -294,4 +310,4 @@ def append_navlink(feed, rel, path, page, order_by, group=None, active=None , ti link['facetGroup'] = group if active: link['activeFacet'] = 'true' - feed['links'].append(link) \ No newline at end of file + links.append(link) diff --git a/api/resources.py b/api/resources.py index bab1fc0db..8dc250349 100755 --- a/api/resources.py +++ b/api/resources.py @@ -10,7 +10,7 @@ from django.conf.urls import url from django.contrib import auth from django.contrib.auth.models import User -from django.core.urlresolvers import reverse +from django.urls import reverse from regluit.core import models import regluit.core.isbn @@ -20,7 +20,7 @@ class EditionResource(ModelResource): work = fields.ForeignKey('regluit.api.resources.WorkResource', 'work') - identifiers = fields.ToManyField('regluit.api.resources.IdentifierResource', 'identifiers') + identifiers = fields.ToManyField('regluit.api.resources.IdentifierResource', 'identifiers', full=True) ebooks = fields.ToManyField('regluit.api.resources.EbookResource', 'ebooks') class Meta: authentication = ApiKeyAuthentication() @@ -45,10 +45,11 @@ def build_filters(self, filters = None, **kwargs): class IdentifierResource(ModelResource): work = fields.ForeignKey('regluit.api.resources.WorkResource', 'work') - edition = fields.ForeignKey('regluit.api.resources.EditionResource', 'edition') + edition = fields.ForeignKey('regluit.api.resources.EditionResource', 'edition', null=True) class Meta: authentication = ApiKeyAuthentication() + include_resource_uri = False queryset = models.Identifier.objects.all() resource_name = 'identifier' filtering = { @@ -59,7 +60,7 @@ class Meta: class WorkResource(ModelResource): editions = fields.ToManyField(EditionResource, 'editions') - identifiers = fields.ToManyField(IdentifierResource, 'identifiers') + identifiers = fields.ToManyField(IdentifierResource, 'identifiers', full=True) class Meta: authentication = ApiKeyAuthentication() @@ -170,6 +171,7 @@ def obj_get_list(self, bundle, **kwargs): return models.Ebook.objects.none() class Meta: + queryset = models.Ebook.objects.all() authentication = ApiKeyAuthentication() fields = [ 'provider', 'rights' ] limit = 0 diff --git a/api/templates/api_help.html b/api/templates/api_help.html index 97fd8239d..7189f0bba 100644 --- a/api/templates/api_help.html +++ b/api/templates/api_help.html @@ -55,45 +55,65 @@

Free Ebooks by ISBN

{% else %} (Log in to see links) {% endif %}

+

Identifier Resolution

-

Here's how to get work/edition data for an isbn

+

Here's how to get use the api to find related identifiers:

+ + + + + {% if user.is_authenticated %}

Campaign info

Here's how to get data on all campaigns. if the user is logged in to Unglue.it, they can tell if the campaign book is on their fave list

-

JSON: {{base_url}}/api/v1/campaign/?format=json&api_key={your_api_key}&username={your_username}<
XML: {{base_url}}/api/v1/campaign/?format=json&api_key={your_api_key}&username={your_username}

-

Identifier Resolution

-

Here's how to get work/edition data for an isbn

-

JSON: {{base_url}}/api/v1/identifier/?format=json&api_key={your_api_key}&username={your_username}&type=isbn&value=9780441012039
- XML: {{base_url}}/api/v1/identifier/?format=xml&api_key={your_api_key}&username={your_username}&type=isbn&value=9780441012039

-

In addition to isbn, you can use 'goog' if you have a google books id, and 'oclc' for oclc numbers.

- {% endif %} - +

JSON: {{base_url}}/api/v1/campaign/?format=json&api_key={your_api_key}&username={your_username}
XML: {{base_url}}/api/v1/campaign/?format=json&api_key={your_api_key}&username={your_username}

+ {% endif %}

OPDS Catalog Feeds

-

We have a basic implementation of OPDS feeds. You don't need a key to use them. The starting point is {{base_url}}{% url 'opds' %}

+

We have a basic implementation of OPDS feeds. You don't need a key to use them. The starting point is {{base_url}}{% url 'opds' %}. Use the page parameter to page through the results.

Examples:

filtered by format
-
{{base_url}}{% url 'opds_acqusition' 'epub' %}
+
{{base_url}}{% url 'opds_acqusition' 'epub' %}
filtered by license
-
{{base_url}}{% url 'opds_acqusition' 'by-sa' %}
+
{{base_url}}{% url 'opds_acqusition' 'by-sa' %}
filtered by title search
-
{{base_url}}{% url 'opds_acqusition' 's.open' %}
+
{{base_url}}{% url 'opds_acqusition' 's.open' %}
filtered by keyword
-
{{base_url}}{% url 'opds_acqusition' 'kw.fiction' %}
+
{{base_url}}{% url 'opds_acqusition' 'kw.fiction' %}
filtered by ungluer
-
{{base_url}}{% url 'opds_acqusition' '@eric' %}
+
{{base_url}}{% url 'opds_acqusition' '@eric' %}
+
filtered by having a Project Gutenberg or DOAB identifier (doab, gtbg)
+
{{base_url}}{% url 'opds_acqusition' 'doab/-gtbg' %}?page=1

There's also an OPDS record available for every work on unglue.it. For example, requesting, {{base_url}}{% url 'opds_acqusition' 'all'%}?work=13950 get you to the web page or opds record for A Christmas Carol.

ONIX Catalog Feeds

-

There is an ONIX 3.0 feed corresponding to every facet of our free ebook lists. You don't need a key to use them. There is a maximum of 100 books per result you can change with the max parameter. For example, here are the first hundred CC BY-ND-ND licensed books available in EPUB.

+

There is an ONIX 3.0 feed corresponding to every facet of our free ebook lists. You don't need a key to use them. There is a maximum of 100 books per result you can change with the max parameter. For example, here are the first twenty CC BY-ND-ND licensed books available in EPUB. Pages of 30 records each are available via the page parameter. Here's the first page of books from the Directory of Open Access Books.

There's also an ONIX record available for every free ebook on unglue.it. For example, here is Issues in Open Research Data.

Identifiers with Content type negotiation

diff --git a/api/templates/editions.html b/api/templates/editions.html index 71e815320..10fb0f8e0 100644 --- a/api/templates/editions.html +++ b/api/templates/editions.html @@ -8,7 +8,7 @@ - {% if editions %} + {% if editions.exists %}
    {% for edition in editions %}
  • {{edition.id}} | {{edition.title}} | @@ -22,5 +22,3 @@ - - diff --git a/api/templates/load_yaml.html b/api/templates/load_yaml.html index a66be387a..9826a3e95 100644 --- a/api/templates/load_yaml.html +++ b/api/templates/load_yaml.html @@ -14,5 +14,3 @@ - - diff --git a/api/templates/opds.json b/api/templates/opds.json index 06d209124..aca25025d 100644 --- a/api/templates/opds.json +++ b/api/templates/opds.json @@ -11,7 +11,7 @@ {"title": "{{ feed.title }} - Popular", "href": "{{ feed.feed_path|urlencode }}/?order_by=popular", "type": "application/opds+json"}, {"title": "{{ feed.title }} - New", "href": "{{ feed.feed_path|urlencode }}/?order_by=newest", "type": "application/opds+json" }, {% for feed in feeds %} - {"title": "{{ feed.title }}", "href": "{{ feed.feed_path|urlencode }}/", "type": "application/opds+json" }, + {"title": "{{ feed.title }}", "href": "{{ feed.feed_path|urlencode }}/", "type": "application/opds+json" }{% if not forloop.last %},{% endif %} {% endfor %} ] } diff --git a/api/templates/opds.xml b/api/templates/opds.xml index 30cb09304..7524c6f63 100644 --- a/api/templates/opds.xml +++ b/api/templates/opds.xml @@ -4,7 +4,7 @@ xsi:noNamespaceSchemaLocation="http://www.kbcafe.com/rss/atom.xsd.xml"> Unglue.it Catalog https://unglue.it{% url 'opds' %} - 2014-06-13T21:48:34Z + {{ feed.updated }} unglue.it https://unglue.it/ @@ -45,4 +45,4 @@ {{ feed.description }} {% endfor %} - \ No newline at end of file + diff --git a/api/templates/widget.html b/api/templates/widget.html index 657da2372..ff6f52385 100644 --- a/api/templates/widget.html +++ b/api/templates/widget.html @@ -43,5 +43,3 @@ - - diff --git a/api/tests.py b/api/tests.py index 3e5e39f7a..8464d3579 100755 --- a/api/tests.py +++ b/api/tests.py @@ -46,7 +46,7 @@ def setUp(self): def test_user(self): self.assertEqual(User.objects.all().count(), 1) - self.assertTrue(User.objects.all()[0].api_key.key) + self.assertTrue(User.objects.first().api_key.key) def test_no_auth(self): r = self.client.get('/api/v1/campaign/', data={'format': 'json'}) @@ -142,7 +142,7 @@ def test_logged_in_user_info(self): 'username': self.user.username, 'api_key': self.user.api_key.key }) - self.assertTrue(r.content.find('CC BY')>0) + self.assertTrue(r.content.find(b'CC BY')>0) def test_widget(self): r = self.client.get('/api/widget/0441007465/') diff --git a/api/urls.py b/api/urls.py index ec34a6a96..a5223895a 100644 --- a/api/urls.py +++ b/api/urls.py @@ -1,6 +1,6 @@ from tastypie.api import Api -from django.conf.urls import patterns, url, include +from django.conf.urls import url, include from django.views.generic.base import TemplateView from regluit.api import resources diff --git a/api/views.py b/api/views.py index 5a3c2f3de..b2e348a61 100755 --- a/api/views.py +++ b/api/views.py @@ -1,13 +1,13 @@ from tastypie.models import ApiKey -import json +import json as json_module import logging from django.contrib import auth from django.contrib.auth.models import User from django.contrib.sites.models import Site -from django.core.urlresolvers import reverse -from django.shortcuts import render, render_to_response +from django.urls import reverse +from django.shortcuts import render from django.template import RequestContext from django.views.decorators.csrf import csrf_exempt from django.views.generic.base import View, TemplateView @@ -17,25 +17,28 @@ HttpResponseBadRequest, HttpResponseRedirect, Http404, + StreamingHttpResponse, ) import regluit.core.isbn -from regluit.core.bookloader import load_from_yaml from regluit.api import opds, onix, opds_json from regluit.api.models import repo_allowed - +from regluit.core.bookloader import load_from_yaml +from regluit.core.covers import DEFAULT_COVER from regluit.core import models +from regluit.core.parameters import ORDER_BY_KEYS logger = logging.getLogger(__name__) +ANONYMOUS_MAX_RECORDS = 100 def editions(request): editions = models.Edition.objects.all() - return render(request, 'editions.html', + return render(request, 'editions.html', {'editions':editions}, - ) + ) def negotiate_content(request,work_id): if request.META.get('HTTP_ACCEPT', None): @@ -43,7 +46,7 @@ def negotiate_content(request,work_id): return HttpResponseRedirect(reverse('opds_acqusition',args=['all'])+'?work='+work_id) elif "text/xml" in request.META['HTTP_ACCEPT']: return HttpResponseRedirect(reverse('onix',args=['all'])+'?work='+work_id) - + return HttpResponseRedirect(reverse('work', kwargs={'work_id': work_id})) def featured_work(): @@ -51,17 +54,17 @@ def featured_work(): work = models.Work.objects.filter(featured__isnull=False).distinct().order_by('-featured')[0] except: #shouldn't occur except in tests - work = models.Work.objects.all()[0] + work = models.Work.objects.first() return work def widget(request, isbn): """ supply info for book panel. parameter is named isbn for historical reasons. can be isbn or work_id """ - + if isbn == 'featured': work = featured_work() - else : + else : if len(isbn)==10: isbn = regluit.core.isbn.convert_10_to_13(isbn) if len(isbn)==13: @@ -69,19 +72,19 @@ def widget(request, isbn): identifier = models.Identifier.objects.get(type = 'isbn', value = isbn ) work = identifier.work except models.Identifier.DoesNotExist: - return render(request, 'widget.html', - { 'work':None,}, + return render(request, 'widget.html', + { 'work':None,}, ) else: work= models.safe_get_work(isbn) - return render(request, 'widget.html', - {'work':work, }, + return render(request, 'widget.html', + {'work':work, }, ) def featured_cover(request): work = featured_work() tn = work.cover_image_thumbnail() - return HttpResponseRedirect(tn if tn else "/static/images/generic_cover_larger.png") + return HttpResponseRedirect(tn if tn else DEFAULT_COVER) def featured_url(request): work = featured_work() @@ -99,63 +102,63 @@ def load_yaml(request): try: work_id = load_from_yaml(repo_url) return HttpResponseRedirect(reverse('work', args=[work_id])) - except: + except: return HttpResponse('unsuccessful') - -@csrf_exempt + +@csrf_exempt def travisci_webhook(request): """ Respond to travis-ci webhooks from Project GITenberg repositories. If the webhook is successfully parsed, the metdata.yaml for the repository is loaded using load_from_yaml. https://docs.travis-ci.com/user/notifications/#Webhook-notification - + """ if request.method == "POST": - + try: - - data = json.loads(request.POST.get('payload')) + + data = json_module.loads(request.POST.get('payload')) # example of URL to feed to yaml loader: # https://github.com/GITenberg/Adventures-of-Huckleberry-Finn_76/raw/master/metadata.yaml - + if data['status_message'] == 'Passed' and data['type'] == 'push': - + # another way to get owner_name / name would be request.META.get('HTTP_TRAVIS_REPO_SLUG', '') repo_url = "https://github.com/{}/{}/raw/master/metadata.yaml".format(data['repository']['owner_name'], data['repository']['name']) - + work_id = load_from_yaml(repo_url) return HttpResponse('Successful. work_id: {}'.format(work_id)) - + except Exception as e: - return HttpResponseBadRequest('Unsuccessful. Exception: {}'.format(unicode(e))) - + return HttpResponseBadRequest('Unsuccessful. Exception: {}'.format(str(e))) + else: - + return HttpResponse('No action') - + else: return HttpResponse('No action') - - - + + + class ApiHelpView(TemplateView): template_name = "api_help.html" def get_context_data(self, **kwargs): context = super(ApiHelpView, self).get_context_data(**kwargs) - + # base_url passed in to allow us to write absolute URLs for this site base_url = self.request.build_absolute_uri("/")[:-1] context["base_url"] = base_url - + # if user is logged in, pass in the user's API key u = auth.get_user(self.request) - if u.is_authenticated(): + if u.is_authenticated: api_key = ApiKey.objects.filter(user=u)[0].key context['api_key'] = api_key - + # pass in a sample Campaign whose widget can be displayed campaigns = models.Campaign.objects.all() if len(campaigns): @@ -164,22 +167,22 @@ def get_context_data(self, **kwargs): context["campaign"] = campaigns[0] context["campaign_isbn"] = isbn - return context + return context class OPDSNavigationView(TemplateView): - json=False + json = False # https://stackoverflow.com/a/6867976: secret to how to change content-type - + def render_to_response(self, context, **response_kwargs): - if json: + if self.json: response_kwargs['content_type'] = "application/vnd.opds.navigation+json" else: - response_kwargs['content_type'] = "application/atom+xml;profile=opds-catalog;kind=navigation" + response_kwargs['content_type'] = opds.NAVIGATION return super(TemplateView, self).render_to_response(context, **response_kwargs) - + def get_context_data(self, **kwargs): context = super(OPDSNavigationView, self).get_context_data(**kwargs) - if json: + if self.json: context["feeds"] = opds_json.feeds() context["feed"] = opds_json.get_facet_facet('all') else: @@ -193,47 +196,63 @@ def get(self, request, *args, **kwargs): work = request.GET.get('work', None) if work: if self.json: - return HttpResponse(opds_json.opds_feed_for_work(work), + return StreamingHttpResponse(opds_json.opds_feed_for_work(work), content_type="application/opds-publication+json") else: - return HttpResponse(opds.opds_feed_for_work(work), - content_type="application/atom+xml;profile=opds-catalog;kind=acquisition") + return StreamingHttpResponse(opds.opds_feed_for_work(work), + content_type=opds.ACQUISITION) facet = kwargs.get('facet') page = request.GET.get('page', None) order_by = request.GET.get('order_by', 'newest') + + # robots occasionally mangle order_by + order_by = order_by if order_by in ORDER_BY_KEYS else 'newest' + try: page = int(page) except: page = None if self.json: facet_class = opds_json.get_facet_class(facet)() - return HttpResponse(facet_class.feed(page,order_by), - content_type="application/opds+json") + return StreamingHttpResponse(facet_class.feed(page,order_by), + content_type="application/opds+json; charset=utf-8") else: facet_class = opds.get_facet_class(facet)() - return HttpResponse(facet_class.feed(page,order_by), - content_type="application/atom+xml;profile=opds-catalog;kind=acquisition") - + return StreamingHttpResponse(facet_class.feed(page,order_by), + content_type=opds.ACQUISITION) class OnixView(View): - def get(self, request, *args, **kwargs): work = request.GET.get('work', None) + if work: try: - work=models.safe_get_work(work) + work = models.safe_get_work(work) except models.Work.DoesNotExist: - raise Http404 - return HttpResponse(onix.onix_feed_for_work(work), - content_type="text/xml") + raise Http404 + return HttpResponse(onix.onix_feed_for_work(work), content_type="text/xml") + facet = kwargs.get('facet', 'all') - if facet: - max = request.GET.get('max', 100) - try: - max = int(max) - except: - max = None - facet_class = opds.get_facet_class(facet)() - return HttpResponse(onix.onix_feed(facet_class, max), - content_type="text/xml") + + if not facet: + return HttpResponseBadRequest(content='No facet provided') + + max_records = request.GET.get('max', ANONYMOUS_MAX_RECORDS) + + try: + max_records = int(max_records) + except Exception: + max_records = ANONYMOUS_MAX_RECORDS + + max_records = max_records if request.user.is_authenticated else ANONYMOUS_MAX_RECORDS + + facet_class = opds.get_facet_class(facet)() + page = request.GET.get('page', None) + try: + page = int(page) + except: + page = None + + feed = onix.onix_feed(facet_class, max_records, page_number=page) + return StreamingHttpResponse(feed, content_type="text/xml") diff --git a/bisac/__init__.py b/bisac/__init__.py index 554feb6d8..3a7fe611f 100644 --- a/bisac/__init__.py +++ b/bisac/__init__.py @@ -1,5 +1,4 @@ # data from https://github.com/edsu/bisac - class Bisac(object): def __init__(self): @@ -16,7 +15,6 @@ def code(self, subject): return top return bisac.get(subject, {}).get('notation','') - bisac= { "Religion / Christian Life / Social Issues": { "related": [], @@ -23130,4 +23128,215 @@ def code(self, subject): "notation": "HIS042000", "alt_label": [] }, + "Political Science / Public Policy / Economic Policy": { + "related": [], + "pref_label": "Political Science / Public Policy / Economic Policy", + "notation": "POL024000", + "alt_label": [] + }, + "Biography & Autobiography / Science & Technology": { + "related": [], + "pref_label": "Biography & Autobiography / Science & Technology", + "notation": "BIO015000", + "alt_label": [] + }, + "History / Middle East / Iran": { + "related": [], + "pref_label": "History / Middle East / Iran", + "notation": "HIS026020", + "alt_label": [] + }, + "History / Europe / Spain & Portugal": { + "related": [], + "pref_label": "History / Europe / Spain & Portugal", + "notation": "HIS045000", + "alt_label": [] + }, + "History / African American": { + "related": [], + "pref_label": "History / African American", + "notation": "HIS056000", + "alt_label": [] + }, + "History / Women": { + "related": [], + "pref_label": "History / Women", + "notation": "HIS058000", + "alt_label": [] + }, + "History / Europe / Poland": { + "related": [], + "pref_label": "History / Europe / Poland", + "notation": "HIS060000", + "alt_label": [] + }, + "Language Arts & Disciplines / Literacy": { + "related": [], + "pref_label": "Language Arts & Disciplines / Literacy", + "notation": "LAN010000", + "alt_label": [] + }, + "Language Arts & Disciplines / Linguistics / Sociolinguistics": { + "related": [], + "pref_label": "Language Arts & Disciplines / Linguistics / Sociolinguistics", + "notation": "LAN009050", + "alt_label": [] + }, + "Language Arts & Disciplines / Library & Information Science / Digital & Online Resources": { + "related": [], + "pref_label": "Language Arts & Disciplines / Library & Information Science / Digital & Online Resources", + "notation": "LAN025060", + "alt_label": [] + }, + "Literary Criticism / European / Eastern": { + "related": [], + "pref_label": "Literary Criticism / European / Eastern", + "notation": "LIT004110", + "alt_label": [] + }, + "Literary Criticism / Comparative Literature": { + "related": [], + "pref_label": "Literary Criticism / Comparative Literature", + "notation": "LIT020000", + "alt_label": [] + }, + "Literary Criticism / Modern / General": { + "related": [], + "pref_label": "Literary Criticism / Modern / General", + "notation": "LIT024000", + "alt_label": [] + }, + "Literary Criticism / Modern / 16th Century": { + "related": [], + "pref_label": "Literary Criticism / Modern / 16th Century", + "notation": "LIT024010", + "alt_label": [] + }, + "Literary Criticism / Modern / 17th Century": { + "related": [], + "pref_label": "Literary Criticism / Modern / 17th Century", + "notation": "LIT024020", + "alt_label": [] + }, + "Literary Criticism / Modern / 18th Century": { + "related": [], + "pref_label": "Literary Criticism / Modern / 18th Century", + "notation": "LIT024030", + "alt_label": [] + }, + "Literary Criticism / Modern / 19th Century": { + "related": [], + "pref_label": "Literary Criticism / Modern / 19th Century", + "notation": "LIT024040", + "alt_label": [] + }, + "Literary Criticism / Modern / 20th Century": { + "related": [], + "pref_label": "Literary Criticism / Modern / 20th Century", + "notation": "LIT024050", + "alt_label": [] + }, + "Literary Criticism / Modern / 21st Century": { + "related": [], + "pref_label": "Literary Criticism / Modern / 21st Century", + "notation": "LIT024060", + "alt_label": [] + }, + "Political Science / Security (National & International)": { + "related": [], + "pref_label": "Political Science / Security (National & International)", + "notation": "POL012000", + "alt_label": [] + }, + "Political Science / Intergovernmental Organizations": { + "related": [], + "pref_label": "Political Science / Intergovernmental Organizations", + "notation": "POL048000", + "alt_label": [] + }, + "Political Science / Genocide & War Crimes": { + "related": [], + "pref_label": "Political Science / Genocide & War Crimes", + "notation": "POL061000", + "alt_label": [] + }, + "Political Science / Geopolitics": { + "related": [], + "pref_label": "Political Science / Geopolitics", + "notation": "POL062000", + "alt_label": [] + }, + "Political Science / Political Process / Media & Internet": { + "related": [], + "pref_label": "Political Science / Political Process / Media & Internet", + "notation": "POL065000", + "alt_label": [] + }, + "Political Science / Public Policy / Military Policy": { + "related": [], + "pref_label": "Political Science / Public Policy / Military Policy", + "notation": "POL069000", + "alt_label": [] + }, + "Psychology / Animal & Comparative Psychology": { + "related": [], + "pref_label": "Psychology / Animal & Comparative Psychology", + "notation": "PSY054000", + "alt_label": [] + }, + "Religion / Buddhism / General": { + "related": [], + "pref_label": "Religion / Buddhism / General", + "notation": "REL007000", + "alt_label": [] + }, + "Science / Environmental Science": { + "related": [], + "pref_label": "Science / Environmental Science", + "notation": "SCI026000", + "alt_label": [] + }, + "Science / Ethics": { + "related": [], + "pref_label": "Science / Ethics", + "notation": "SCI101000", + "alt_label": [] + }, + "Social Science / Sociology / Social Theory": { + "related": [], + "pref_label": "Social Science / Sociology / Social Theory", + "notation": "SOC026040", + "alt_label": [] + }, + "Social Science / Indigenous Studies": { + "related": [], + "pref_label": "Social Science / Indigenous Studies", + "notation": "SOC062000", + "alt_label": [] + }, + "Technology & Engineering / Electronics / Circuits / General": { + "related": [], + "pref_label": "Technology & Engineering / Electronics / Circuits / General", + "notation": "TEC008010", + "alt_label": [] + }, + "History / Modern / 19th Century": { + "related": [], + "pref_label": "History / Modern / 19th Century", + "notation": "HIS037060", + "alt_label": [] + }, + "History / Europe / Greece": { + "related": [], + "pref_label": "History / Europe / Greece", + "notation": "HIS042000", + "alt_label": [] + }, + "History / Social History": { + "related": [], + "pref_label": "History / Social History", + "notation": "HIS054000", + "alt_label": [] + }, + } \ No newline at end of file diff --git a/bisac/management/commands/load_bisac.py b/bisac/management/commands/load_bisac.py index 123e4e5ea..8ef420c1b 100644 --- a/bisac/management/commands/load_bisac.py +++ b/bisac/management/commands/load_bisac.py @@ -9,4 +9,4 @@ class Command(BaseCommand): def handle(self, **options): populate_bisac_headings() attach_dangling_branches() - print "bisac table is ready" + print("bisac table is ready") diff --git a/bisac/migrations/0001_initial.py b/bisac/migrations/0001_initial.py index 0baaf7b1c..bd0d51d7c 100644 --- a/bisac/migrations/0001_initial.py +++ b/bisac/migrations/0001_initial.py @@ -22,7 +22,7 @@ class Migration(migrations.Migration): ('rght', models.PositiveIntegerField(editable=False, db_index=True)), ('tree_id', models.PositiveIntegerField(editable=False, db_index=True)), ('level', models.PositiveIntegerField(editable=False, db_index=True)), - ('parent', mptt.fields.TreeForeignKey(related_name='children', blank=True, to='bisac.BisacHeading', null=True)), + ('parent', mptt.fields.TreeForeignKey(on_delete=models.CASCADE, related_name='children', blank=True, to='bisac.BisacHeading', null=True)), ], options={ 'abstract': False, diff --git a/bisac/models.py b/bisac/models.py index 37c12ffd1..c379cec69 100644 --- a/bisac/models.py +++ b/bisac/models.py @@ -6,13 +6,24 @@ class BisacHeading(MPTTModel): full_label = models.CharField(max_length=100, unique=True) label = models.CharField(max_length=60, unique=False) notation = models.CharField(max_length=9, unique=False) - parent = TreeForeignKey('self', null=True, blank=True, related_name='children', db_index=True) + parent = TreeForeignKey('self', on_delete=models.CASCADE, null=True, blank=True, related_name='children', db_index=True) class MPTTMeta: order_insertion_by = ['notation'] - def __unicode__(self): + def __str__(self): return self.full_label + +def interpret_notation(notation): + #translate a notation + if notation: + try: + bisac_heading = BisacHeading.objects.get(notation=notation) + return bisac_heading.full_label + except BisacHeading.DoesNotExist: + pass + return notation + def populate_bisac_headings(): for key in bisac.keys(): diff --git a/bisac/urls.py b/bisac/urls.py index cf368f861..bb5e55f98 100644 --- a/bisac/urls.py +++ b/bisac/urls.py @@ -1,4 +1,4 @@ -from django.conf.urls import patterns, url, include +from django.conf.urls import url from .views import tree urlpatterns = [ diff --git a/bookdata/sitemaps.txt b/bookdata/sitemaps.txt index b91f2814e..5295a956c 100644 --- a/bookdata/sitemaps.txt +++ b/bookdata/sitemaps.txt @@ -1,9 +1,19 @@ -https://www.ubiquitypress.com/sitemap.xml -https://www.kriterium.se/sitemap.xml +https://aperio.press/sitemap.xml +https://hup.fi/sitemap.xml +https://iitikship.iiti.ac.in/sitemap.xml https://oa.finlit.fi/sitemap.xml -https://www.humanities-map.net/sitemap.xml https://oa.psupress.org/sitemap.xml +https://press.lse.ac.uk/sitemap.xml +https://press.sjms.nu/sitemap.xml +https://publishing.vt.edu/sitemap.xml +https://universitypress.whiterose.ac.uk/sitemap.xml +https://utsepress.lib.uts.edu.au/sitemap.xml +https://www.humanities-map.net/sitemap.xml +https://www.kriterium.se/sitemap.xml https://www.larcommons.net/sitemap.xml -https://www.uwestminsterpress.co.uk/sitemap.xml +https://www.luminosoa.org/sitemap.xml +https://www.mwv-open.de/sitemap.xml https://www.stockholmuniversitypress.se/sitemap.xml -https://www.luminosoa.org/sitemap.xml \ No newline at end of file +https://www.ubiquitypress.com/sitemap.xml +https://www.uwestminsterpress.co.uk/sitemap.xml +https://www.winchesteruniversitypress.org/sitemap.xml diff --git a/booxtream/__init__.py b/booxtream/__init__.py index 4c3d9cca7..60658ed05 100644 --- a/booxtream/__init__.py +++ b/booxtream/__init__.py @@ -1,6 +1,6 @@ import random from functools import partial -from urllib import quote +from urllib.parse import quote from xml.etree import ElementTree import requests @@ -64,7 +64,6 @@ def platform(self, epubfile=None, epub=True, kf8mobi=False, **kwargs): # fake it, so you can test other functions without hitting booxtream boox = Boox.objects.create( download_link_epub='https://github.com/eshellman/42_ebook/blob/master/download/42.epub?raw=true&extra=download.booxtream.com/', - download_link_mobi='https://github.com/eshellman/42_ebook/blob/master/download/42.mobi?raw=true', referenceid= kwargs.get('referenceid', '42'), downloads_remaining=kwargs.get('downloadlimit', 10), expirydays=kwargs.get('expirydays', 30), @@ -81,12 +80,8 @@ def platform(self, epubfile=None, epub=True, kf8mobi=False, **kwargs): download_link_epub = doc.find('.//DownloadLink[@type="epub"]') if download_link_epub is not None: download_link_epub = download_link_epub.text - download_link_mobi = doc.find('.//DownloadLink[@type="mobi"]') - if download_link_mobi is not None: - download_link_mobi = download_link_mobi.text boox = Boox.objects.create( download_link_epub=download_link_epub, - download_link_mobi=download_link_mobi, referenceid=kwargs.get('referenceid'), downloads_remaining=kwargs.get('downloadlimit'), expirydays=kwargs.get('expirydays'), diff --git a/booxtream/migrations/0002_remove_boox_download_link_mobi.py b/booxtream/migrations/0002_remove_boox_download_link_mobi.py new file mode 100644 index 000000000..668870ff7 --- /dev/null +++ b/booxtream/migrations/0002_remove_boox_download_link_mobi.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.11.29 on 2022-07-28 06:16 +from __future__ import unicode_literals + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('booxtream', '0001_initial'), + ] + + operations = [ + migrations.RemoveField( + model_name='boox', + name='download_link_mobi', + ), + ] diff --git a/booxtream/models.py b/booxtream/models.py index cbf2e6066..09ccbea2b 100644 --- a/booxtream/models.py +++ b/booxtream/models.py @@ -7,7 +7,6 @@ class Boox(models.Model): keeps a record of a file that's been watermarked """ download_link_epub = models.URLField(null=True) - download_link_mobi = models.URLField(null=True) referenceid = models.CharField(max_length=32) downloads_remaining = models.PositiveSmallIntegerField(default=0) expirydays = models.PositiveSmallIntegerField() @@ -20,7 +19,5 @@ def expired(self): def download_link(self, format): if format == 'epub': return self.download_link_epub - elif format == 'mobi': - return self.download_link_mobi return None diff --git a/booxtream/tests.py b/booxtream/tests.py index c7cf266ff..6f27e65d1 100644 --- a/booxtream/tests.py +++ b/booxtream/tests.py @@ -1,19 +1,19 @@ import unittest import time -import urllib2 +from urllib.request import urlopen from tempfile import NamedTemporaryFile -from StringIO import StringIO +from io import BytesIO from django.conf import settings class TestBooXtream(unittest.TestCase): def setUp(self): # get a small epub test file as a file-like object self.epub2file = NamedTemporaryFile(delete=False) - test_file_content = urllib2.urlopen('http://www.hxa.name/articles/content/EpubGuide-hxa7241.epub') + test_file_content = urlopen('https://www.hxa.name/articles/content/EpubGuide-hxa7241.epub') self.epub2file.write(test_file_content.read()) self.epub2file.seek(0) self.textfile = NamedTemporaryFile(delete=False) - self.textfile.write("bad text file") + self.textfile.write(b'bad text file') self.textfile.seek(0) @@ -67,7 +67,7 @@ def test_booxtream_good(self): # make sure it works with an in-memory file self.epub2file.seek(0) - in_mem_epub = StringIO() + in_mem_epub = BytesIO() in_mem_epub.write(self.epub2file.read()) in_mem_epub.seek(0) boox2 = inst.platform(epubfile=in_mem_epub, **params) diff --git a/celery_module.py b/celery_module.py new file mode 100644 index 000000000..20160ae1c --- /dev/null +++ b/celery_module.py @@ -0,0 +1,20 @@ +from __future__ import absolute_import, unicode_literals + +import os + +from celery import Celery + +# set the default Django settings module for the 'celery' program. +os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'regluit.settings') + +app = Celery('regluit') + +# Using a string here means the worker doesn't have to serialize +# the configuration object to child processes. + +app.config_from_object('django.conf:settings') + +# Load task modules from all registered Django app configs. +app.autodiscover_tasks() + + diff --git a/context_processors.py b/context_processors.py index 02db6a844..3c172a98d 100644 --- a/context_processors.py +++ b/context_processors.py @@ -1,12 +1,20 @@ def is_preview(request): - from django.conf import settings - return {'jquery_home': settings.JQUERY_HOME, 'jquery_ui_home': settings.JQUERY_UI_HOME, 'is_preview': settings.IS_PREVIEW, 'show_google_analytics': settings.SHOW_GOOGLE_ANALYTICS} - + from django.conf import settings + return { + 'jquery_home': settings.JQUERY_HOME, + 'jquery_ui_home': settings.JQUERY_UI_HOME, + 'jquery_ui_theme': settings.JQUERY_UI_THEME, + 'is_preview': settings.IS_PREVIEW, + 'show_google_analytics': settings.SHOW_GOOGLE_ANALYTICS + } + def count_unseen(request): - - if request.user.is_anonymous(): - count = 0 - else: - from notification.models import Notice - count = Notice.objects.unseen_count_for(request.user) - return {'unseen_count': count} \ No newline at end of file + try: + if request.user.is_anonymous: + count = 0 + else: + from notification.models import Notice + count = Notice.objects.unseen_count_for(request.user) + except AttributeError: + count = 0 + return {'unseen_count': count} \ No newline at end of file diff --git a/core/admin.py b/core/admin.py index 73706deb9..e365fc706 100644 --- a/core/admin.py +++ b/core/admin.py @@ -3,7 +3,8 @@ # from django import forms from django.contrib.admin import ModelAdmin, register -from django.core.urlresolvers import reverse +from django.urls import reverse +from django.utils.safestring import mark_safe from selectable.forms import ( AutoCompleteSelectWidget, @@ -196,7 +197,7 @@ class Meta(object): class EbookAdmin(ModelAdmin): form = EbookAdminForm search_fields = ('edition__title', '^url') # search by provider using leading url - list_display = ('__unicode__', 'created', 'user', 'edition') + list_display = ('__str__', 'created', 'user', 'edition') date_hierarchy = 'created' ordering = ('edition__title',) readonly_fields = ('user', 'filesize', 'download_count') @@ -226,20 +227,17 @@ class EbookFileAdmin(ModelAdmin): date_hierarchy = 'created' ordering = ('edition__work',) fields = ('file', 'format', 'edition', 'edition_link', 'ebook', 'ebook_link', 'source') - readonly_fields = ('file', 'edition_link', 'ebook_link',) + readonly_fields = ('file', 'edition_link', 'ebook_link', 'source') def edition_link(self, obj): if obj.edition: link = reverse("admin:core_edition_change", args=[obj.edition_id]) - return u'%s' % (link, obj.edition) - return u'' + return mark_safe('%s' % (link, obj.edition)) + return '' def ebook_link(self, obj): if obj.ebook: link = reverse("admin:core_ebook_change", args=[obj.ebook_id]) - return u'%s' % (link, obj.ebook) - return u'' - edition_link.allow_tags = True - ebook_link.allow_tags = True - + return mark_safe('%s' % (link, obj.ebook)) + return '' @register(models.Wishlist) class WishlistAdmin(ModelAdmin): @@ -257,8 +255,7 @@ class GiftAdmin(ModelAdmin): search_fields = ('giver__username', 'to') readonly_fields = ('giver', 'acq',) def acq_admin_link(self, gift): - return "%s" % (gift.acq_id, gift.acq) - acq_admin_link.allow_tags = True + return mark_safe("%s" % (gift.acq_id, gift.acq)) @register(models.CeleryTask) class CeleryTaskAdmin(ModelAdmin): diff --git a/core/bookloader.py b/core/bookloader.py index a9fed0c2f..2baabe052 100755 --- a/core/bookloader.py +++ b/core/bookloader.py @@ -6,7 +6,7 @@ import re from datetime import timedelta from xml.etree import ElementTree -from urlparse import (urljoin, urlparse) +from urllib.parse import (urljoin, urlparse) import requests @@ -14,29 +14,31 @@ # django imports from django.conf import settings -from django.core.files.base import ContentFile from django.core.files.storage import default_storage from django.db import IntegrityError +from django.db.models import Sum from django.forms import ValidationError +from django.utils.timezone import now from django_comments.models import Comment +import github3 from github3 import (login, GitHub) from github3.repos.release import Release -from django.utils.timezone import now from gitenberg.metadata.pandata import Pandata # regluit imports import regluit import regluit.core.isbn -from regluit.core.validation import test_file from regluit.marc.models import inverse_marc_rels +from regluit.utils.lang import lang_to_language_code from . import cc from . import models -from .parameters import WORK_IDENTIFIERS +from .parameters import WORK_IDENTIFIERS, DOWNLOADABLE from .validation import identifier_cleaner, unreverse_name +from .models import loader logger = logging.getLogger(__name__) request_log = logging.getLogger("requests") @@ -58,10 +60,10 @@ def add_by_oclc_from_google(oclc): url = "https://www.googleapis.com/books/v1/volumes" try: results = _get_json(url, {"q": '"OCLC%s"' % oclc}) - except LookupFailure, e: + except LookupFailure as e: logger.exception(u"lookup failure for %s", oclc) return None - if not results.has_key('items') or not results['items']: + if not 'items' in results or not results['items']: logger.warn(u"no google hits for %s", oclc) return None @@ -69,9 +71,9 @@ def add_by_oclc_from_google(oclc): e = add_by_googlebooks_id(results['items'][0]['id'], results=results['items'][0]) models.Identifier(type='oclc', value=oclc, edition=e, work=e.work).save() return e - except LookupFailure, e: + except LookupFailure as e: logger.exception(u"failed to add edition for %s", oclc) - except IntegrityError, e: + except IntegrityError as e: logger.exception(u"google books data for %s didn't fit our db", oclc) return None @@ -135,7 +137,7 @@ def get_google_isbn_results(isbn): except LookupFailure: logger.exception(u"lookup failure for %s", isbn) return None - if not results.has_key('items') or not results['items']: + if not 'items' in results or not results['items']: logger.warn(u"no google hits for %s", isbn) return None return results @@ -187,7 +189,7 @@ def update_edition(edition): item = results['items'][0] googlebooks_id = item['id'] d = item['volumeInfo'] - if d.has_key('title'): + if 'title' in d: title = d['title'] else: title = '' @@ -197,7 +199,7 @@ def update_edition(edition): title = edition.work.title # check for language change - language = d['language'] + language = fix_lang(d['language']) # allow variants in main language (e.g., 'zh-tw') if len(language) > 5: language = language[0:5] @@ -206,7 +208,7 @@ def update_edition(edition): # attach edition to the if edition.work.language != language: logger.info(u"reconnecting %s since it is %s instead of %s", - googlebooks_id, language, edition.work.language) + googlebooks_id, language, edition.work.language) old_work = edition.work new_work = models.Work(title=title, language=language) @@ -217,7 +219,7 @@ def update_edition(edition): logger.info(u"moving identifier %s", identifier.value) identifier.work = new_work identifier.save() - if old_work and old_work.editions.count() == 0: + if old_work and not old_work.editions.exists(): #a dangling work; make sure nothing else is attached! merge_works(new_work, old_work) @@ -250,9 +252,10 @@ def get_isbn_item(items, isbn): for ident in industryIdentifiers: if ident['identifier'] == isbn: return item - else: - return None # no items - return item + # no isbn item maybe "other" + for item in items: + return item + def add_by_isbn_from_google(isbn, work=None): """add a book to the UnglueIt database from google based on ISBN. The work parameter @@ -285,9 +288,9 @@ def add_by_isbn_from_google(isbn, work=None): results=item, isbn=isbn ) - except LookupFailure, e: + except LookupFailure as e: logger.exception(u"failed to add edition for %s", isbn) - except IntegrityError, e: + except IntegrityError as e: logger.exception(u"google books data for %s didn't fit our db", isbn) return None return None @@ -306,6 +309,15 @@ def get_edition_by_id(type, value): except models.Identifier.DoesNotExist: return None +def fix_lang(language): + if len(language) > 5: + language = language[0:5] + if language == 'un': + # 5/28/21 language coding is broken in google books + # hope they fix it + language = 'xx' + return language + def add_by_googlebooks_id(googlebooks_id, work=None, results=None, isbn=None): """add a book to the UnglueIt database based on the GoogleBooks ID. The @@ -344,12 +356,12 @@ def add_by_googlebooks_id(googlebooks_id, work=None, results=None, isbn=None): item = _get_json(url) d = item['volumeInfo'] - if d.has_key('title'): + if 'title' in d: title = d['title'] else: title = '' if not title: - # need a title to make an edition record; some crap records in GB. + # need a title to make an edition record; some crap records in GB. # use title from parent if available if work: title = work.title @@ -357,10 +369,7 @@ def add_by_googlebooks_id(googlebooks_id, work=None, results=None, isbn=None): return None # don't add the edition to a work with a different language - # https://www.pivotaltracker.com/story/show/17234433 - language = d['language'] - if len(language) > 5: - language = language[0:5] + language = fix_lang(d['language']) if work and work.language != language: logger.info(u"not connecting %s since it is %s instead of %s", googlebooks_id, language, work.language) @@ -488,13 +497,13 @@ def add_related(isbn): logger.debug(u"merge_works path 1 %s %s", work.id, related_edition.work_id) work = merge_works(work, related_edition.work) else: - if other_editions.has_key(related_language): + if related_language in other_editions: other_editions[related_language].append(related_edition) else: other_editions[related_language] = [related_edition] # group the other language editions together - for lang_group in other_editions.itervalues(): + for lang_group in other_editions.values(): logger.debug(u"lang_group (ed, work): %s", [(ed.id, ed.work_id) for ed in lang_group]) if len(lang_group) > 1: lang_edition = lang_group[0] @@ -518,7 +527,7 @@ def thingisbn(isbn): which come back as isbn_13') """ logger.info(u"looking up %s at ThingISBN", isbn) - url = "https://www.librarything.com/api/thingISBN/%s" % isbn + url = f"https://www.librarything.com/api/{settings.LIBRARYTHING_KEY}/thingISBN/{isbn}" xml = requests.get(url, headers={"User-Agent": settings.USER_AGENT}).content try: doc = ElementTree.fromstring(xml) @@ -540,7 +549,7 @@ def merge_works(w1, w2, user=None): #don't merge if the works are related. if w2 in w1.works_related_to.all() or w1 in w2.works_related_to.all(): return w1 - + # check if one of the works is a series with parts (that have their own isbn) if w1.works_related_from.filter(relation='part'): models.WorkRelation.objects.get_or_create(to_work=w2, from_work=w1, relation='part') @@ -548,14 +557,20 @@ def merge_works(w1, w2, user=None): if w2.works_related_from.filter(relation='part'): models.WorkRelation.objects.get_or_create(to_work=w1, from_work=w2, relation='part') return w1 - - + if w1.editions.count() > 3 and w2.editions.count() > 3 and not user: + # avoid big merges + return w1 + if w2.selected_edition is not None and w1.selected_edition is None: #the merge should be reversed temp = w1 w1 = w2 w2 = temp - models.WasWork(was=w2.pk, work=w1, user=user).save() + try: + models.WasWork(was=w2.pk, work=w1, user=user).save() + except IntegrityError: + # already a 'was' entry for w2; somehow it was never deleted + pass for ww in models.WasWork.objects.filter(work=w2): ww.work = w1 ww.save() @@ -600,9 +615,6 @@ def merge_works(w1, w2, user=None): for hold in w2.holds.all(): hold.work = w1 hold.save() - for landing in w2.landings.all(): - landing.object_id = w1.id - landing.save() for subject in w2.subjects.all(): if subject not in w1.subjects.all(): w1.subjects.add(subject) @@ -615,21 +627,35 @@ def merge_works(w1, w2, user=None): w2.delete(cascade=False) return w1 -def detach_edition(e): +def detach_editions(eds): """ - will detach edition from its work, creating a new stub work. if remerge=true, will see if - there's another work to attach to + will detach edition from its work, creating a new stub work. """ + if not len(eds): + return + e = eds[0] + from_work = e.work logger.info(u"splitting edition %s from %s", e, e.work) w = models.Work(title=e.title, language=e.work.language) w.save() + for e in eds: + for identifier in e.identifiers.all(): + identifier.work = w + identifier.save() - for identifier in e.identifiers.all(): - identifier.work = w - identifier.save() + e.work = w + e.save() + + models.WorkRelation.objects.get_or_create( + to_work=w, + from_work=from_work, + relation='unspecified', + ) + + frees = models.Work.objects.annotate(free=Sum('editions__ebooks__active')).filter(free__gt=0) + w.is_free = frees.exists() + w.save() - e.work = w - e.save() SPAM_STRINGS = ["GeneralBooksClub.com", "AkashaPublishing.Com"] def despam_description(description): @@ -644,12 +670,14 @@ def despam_description(description): pieces = description.split("a million books for free.") if len(pieces) > 1: return pieces[1] - return description + return description.replace('\r\n', '\n') def add_openlibrary(work, hard_refresh=False): + if not settings.USE_OPENLIBRARY and not settings.DEBUG: + return if (not hard_refresh) and work.openlibrary_lookup is not None: # don't hit OL if we've visited in the past month or so - if now()- work.openlibrary_lookup < timedelta(days=30): + if now()- work.openlibrary_lookup < timedelta(days=90): return work.openlibrary_lookup = now() work.save() @@ -664,7 +692,7 @@ def add_openlibrary(work, hard_refresh=False): url = "https://openlibrary.org/api/books" params = {"format": "json", "jscmd": "details"} subjects = [] - for edition in work.editions.all(): + for edition in work.editions.all()[:10]: isbn_key = "ISBN:%s" % edition.isbn_13 params['bibkeys'] = isbn_key try: @@ -672,9 +700,9 @@ def add_openlibrary(work, hard_refresh=False): except LookupFailure: logger.exception(u"OL lookup failed for %s", isbn_key) e = {} - if e.has_key(isbn_key): - if e[isbn_key].has_key('details'): - if e[isbn_key]['details'].has_key('oclc_numbers'): + if isbn_key in e: + if 'details' in e[isbn_key]: + if 'oclc_numbers' in e[isbn_key]['details']: for oclcnum in e[isbn_key]['details']['oclc_numbers']: models.Identifier.get_or_add( type='oclc', @@ -682,42 +710,36 @@ def add_openlibrary(work, hard_refresh=False): work=work, edition=edition ) - if e[isbn_key]['details'].has_key('identifiers'): + if 'identifiers' in e[isbn_key]['details']: ids = e[isbn_key]['details']['identifiers'] - if ids.has_key('goodreads'): - models.Identifier.get_or_add( - type='gdrd', - value=ids['goodreads'][0], - work=work, edition=edition - ) - if ids.has_key('librarything'): + if 'librarything' in ids: models.Identifier.get_or_add( type='ltwk', value=ids['librarything'][0], work=work ) - if ids.has_key('google'): + if 'google' in ids: models.Identifier.get_or_add( type='goog', value=ids['google'][0], work=work ) - if ids.has_key('project_gutenberg'): + if 'project_gutenberg' in ids: models.Identifier.get_or_add( type='gute', value=ids['project_gutenberg'][0], work=work ) - if e[isbn_key]['details'].has_key('works'): + if 'works' in e[isbn_key]['details']: work_key = e[isbn_key]['details']['works'].pop(0)['key'] logger.info(u"got openlibrary work %s for isbn %s", work_key, isbn_key) models.Identifier.get_or_add(type='olwk', value=work_key, work=work) try: w = _get_json("https://openlibrary.org" + work_key, type='ol') - if w.has_key('description'): + if 'description' in w: description = w['description'] if isinstance(description, dict): - if description.has_key('value'): + if 'value' in description: description = description['value'] description = despam_description(description) if not work.description or \ @@ -725,7 +747,7 @@ def add_openlibrary(work, hard_refresh=False): len(description) > len(work.description): work.description = description work.save() - if w.has_key('subjects') and len(w['subjects']) > len(subjects): + if 'subjects' in w and len(w['subjects']) > len(subjects): subjects = w['subjects'] except LookupFailure: logger.exception(u"OL lookup failed for %s", work_key) @@ -749,7 +771,10 @@ def _get_json(url, params={}, type='gb'): if type == 'gb': params['key'] = settings.GOOGLE_BOOKS_API_KEY params['country'] = 'us' - response = requests.get(url, params=params, headers=headers) + try: + response = requests.get(url, params=params, headers=headers) + except requests.exceptions.ConnectionError: + raise LookupFailure("GET failed: url=%s and params=%s" % (url, params)) if response.status_code == 200: return json.loads(response.content) else: @@ -840,7 +865,6 @@ def load_from_yaml(yaml_url, test_mode=False): return edition.work_id if edition else None def edition_for_ident(id_type, id_value): - #print 'returning edition for {}: {}'.format(id_type, id_value) for ident in models.Identifier.objects.filter(type=id_type, value=id_value): return ident.edition if ident.edition else ident.work.editions[0] @@ -863,22 +887,6 @@ def edition_for_etype(etype, metadata, default=None): for key in metadata.edition_identifiers.keys(): return edition_for_ident(key, metadata.identifiers[key]) -def load_ebookfile(url, etype): - ''' - return a ContentFile if a new ebook has been loaded - ''' - ebfs = models.EbookFile.objects.filter(source=url) - if ebfs: - return None - try: - r = requests.get(url) - contentfile = ContentFile(r.content) - test_file(contentfile, etype) - return contentfile - except IOError, e: - logger.error(u'could not open {}'.format(url)) - except ValidationError, e: - logger.error(u'downloaded {} was not a valid {}'.format(url, etype)) class BasePandataLoader(object): def __init__(self, url): @@ -909,10 +917,9 @@ def load_from_pandata(self, metadata, work=None): if work and id.work and id.work_id is not work.id: # dangerous! merge newer into older if work.id < id.work_id: - merge_works(work, id.work) + work = merge_works(work, id.work) else: - merge_works(id.work, work) - work = id.work + work = merge_works(id.work, work) else: work = id.work if id.edition and not edition: @@ -923,7 +930,11 @@ def load_from_pandata(self, metadata, work=None): new_ids.append((identifier, id_code, value)) if not work: - work = models.Work.objects.create(title=metadata.title, language=metadata.language) + if metadata.title: + language = lang_to_language_code(metadata.language) + work = models.Work.objects.create(title=metadata.title, language=language if language else 'xx') + else: + return None if not edition: if metadata.edition_note: (note, created) = models.EditionNote.objects.get_or_create(note=metadata.edition_note) @@ -949,11 +960,13 @@ def load_from_pandata(self, metadata, work=None): #be careful about overwriting the work description if metadata.description and len(metadata.description) > len(work.description): + if isinstance(metadata.description, list): + metadata.description = '\n'.join(metadata.description) # don't over-write reasonably long descriptions if len(work.description) < 500: - work.description = metadata.description + work.description = metadata.description.replace('\r\n', '\n') - if metadata.creator and not edition.authors.count(): + if metadata.creator and not edition.authors.exists(): edition.authors.clear() for key in metadata.creator.keys(): creators = metadata.creator[key] @@ -966,7 +979,7 @@ def load_from_pandata(self, metadata, work=None): for yaml_subject in metadata.subjects: #always add yaml subjects (don't clear) if isinstance(yaml_subject, tuple): (authority, heading) = yaml_subject - elif isinstance(yaml_subject, str) or isinstance(yaml_subject, unicode) : + elif isinstance(yaml_subject, str) or isinstance(yaml_subject, str): (authority, heading) = ('', yaml_subject) else: continue @@ -986,16 +999,16 @@ def load_from_pandata(self, metadata, work=None): def load_ebooks(self, metadata, edition, test_mode=False, user=None): default_edition = edition - for key in ['epub', 'pdf', 'mobi']: + license = cc.license_from_cc_url(metadata.rights_url) + for key in DOWNLOADABLE: url = metadata.metadata.get('download_url_{}'.format(key), None) if url: edition = edition_for_etype(key, metadata, default=default_edition) if edition: - contentfile = load_ebookfile(url, key) - if contentfile: + contentfile, fmt = loader.load_ebookfile(url, key) + if contentfile and fmt == key: contentfile_name = '/loaded/ebook_{}.{}'.format(edition.id, key) path = default_storage.save(contentfile_name, contentfile) - license = cc.license_from_cc_url(metadata.rights_url) ebf = models.EbookFile.objects.create( format=key, edition=edition, @@ -1064,26 +1077,10 @@ def git_download_from_yaml_url(yaml_url, version, edition_name='book', format_=' ''' if yaml_url.endswith('raw/master/metadata.yaml'): repo_url = yaml_url[0:-24] - #print (repo_url,version,edition_name) ebook_url = repo_url + 'releases/download/' + version + '/' + edition_name + '.' + format_ return ebook_url -def release_from_tag(repo, tag_name): - """Get a release by tag name. - release_from_tag() returns a release with specified tag - while release() returns a release with specified release id - :param str tag_name: (required) name of tag - :returns: :class:`Release ` - """ - # release_from_tag adapted from - # https://github.com/sigmavirus24/github3.py/blob/38de787e465bffc63da73d23dc51f50d86dc903d/github3/repos/repo.py#L1781-L1793 - - url = repo._build_url('releases', 'tags', tag_name, - base_url=repo._api) - json_obj = repo._json(repo._get(url), 200) - return Release(json_obj, repo) if json_obj else None - def ebooks_in_github_release(repo_owner, repo_name, tag, token=None): """ returns a list of (book_type, book_name) for a given GitHub release (specified by @@ -1101,24 +1098,29 @@ def ebooks_in_github_release(repo_owner, repo_name, tag, token=None): gh = GitHub() repo = gh.repository(repo_owner, repo_name) - release = release_from_tag(repo, tag) + try: + release = repo.release_from_tag(tag) + return [(EBOOK_FORMATS.get(asset.content_type), asset.name) + for asset in release.assets() + if EBOOK_FORMATS.get(asset.content_type) is not None] + except github3.exceptions.NotFoundError: + logger.error('No rleases available for %s/%s', repo_owner, repo_name) + return [] - return [(EBOOK_FORMATS.get(asset.content_type), asset.name) - for asset in release.iter_assets() - if EBOOK_FORMATS.get(asset.content_type) is not None] def add_from_bookdatas(bookdatas): ''' bookdatas are iterators of scrapers ''' editions = [] for bookdata in bookdatas: edition = work = None - loader = BasePandataLoader(bookdata.base) - pandata = Pandata() - pandata.metadata = bookdata.metadata - for metadata in pandata.get_edition_list(): - edition = loader.load_from_pandata(metadata, work) - work = edition.work - loader.load_ebooks(pandata, edition) - if edition: - editions.append(edition) + if bookdata and bookdata.metadata: + loader = BasePandataLoader(bookdata.base) + pandata = Pandata() + pandata.metadata = bookdata.metadata + for metadata in pandata.get_edition_list(): + edition = loader.load_from_pandata(metadata, work) + work = edition.work + loader.load_ebooks(pandata, edition) + if edition: + editions.append(edition) return editions diff --git a/core/cc.py b/core/cc.py index 333096e29..1429b3e7a 100644 --- a/core/cc.py +++ b/core/cc.py @@ -8,12 +8,12 @@ import re INFO_CC = ( - ('CC BY-NC-ND', 'by-nc-nd', 'Creative Commons Attribution-NonCommercial-NoDerivs 3.0 Unported (CC BY-NC-ND 3.0)', 'https://creativecommons.org/licenses/by-nc-nd/3.0/', 'Creative Commons Attribution-NonCommercial-NoDerivs'), + ('CC BY-NC-ND', 'by-nc-nd', 'Creative Commons Attribution-NonCommercial-NoDerivs 3.0 Unported (CC BY-NC-ND 3.0)', 'https://creativecommons.org/licenses/by-nc-nd/3.0/', 'Creative Commons Attribution-NonCommercial-NoDerivs'), ('CC BY-NC-SA', 'by-nc-sa', 'Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported (CC BY-NC-SA 3.0)', 'https://creativecommons.org/licenses/by-nc-sa/3.0/', 'Creative Commons Attribution-NonCommercial-ShareAlike'), ('CC BY-NC', 'by-nc', 'Creative Commons Attribution-NonCommercial 3.0 Unported (CC BY-NC 3.0)', 'https://creativecommons.org/licenses/by-nc/3.0/', 'Creative Commons Attribution-NonCommercial'), - ('CC BY-ND', 'by-nd', 'Creative Commons Attribution-NoDerivs 3.0 Unported (CC BY-ND 3.0)', 'https://creativecommons.org/licenses/by-nd/3.0/','Creative Commons Attribution-NoDerivs'), + ('CC BY-ND', 'by-nd', 'Creative Commons Attribution-NoDerivs 3.0 Unported (CC BY-ND 3.0)', 'https://creativecommons.org/licenses/by-nd/3.0/', 'Creative Commons Attribution-NoDerivs'), ('CC BY-SA', 'by-sa', 'Creative Commons Attribution-ShareAlike 3.0 Unported (CC BY-SA 3.0)', 'https://creativecommons.org/licenses/by-sa/3.0/', 'Creative Commons Attribution-ShareAlike'), - ('CC BY', 'by', 'Creative Commons Attribution 3.0 Unported (CC BY 3.0)', 'https://creativecommons.org/licenses/by/3.0/', 'Creative Commons Attribution'), + ('CC BY', 'by', 'Creative Commons Attribution 3.0 Unported (CC BY 3.0)', 'https://creativecommons.org/licenses/by/3.0/', 'Creative Commons Attribution'), ('CC0', 'cc0', 'No Rights Reserved (CC0)', 'https://creativecommons.org/about/cc0', 'No Rights Reserved (CC0)'), ) INFO_FREE = INFO_CC + ( @@ -28,20 +28,20 @@ # CCHOICES, CCGRANTS, and FORMATS are all used in places that expect tuples # CONTENT_TYPES will be easiest to manipulate in ungluify_record as a dict -CCCHOICES = tuple([(item[0],item[2]) for item in INFO_CC]) -FREECHOICES = tuple([(item[0],item[2]) for item in INFO_FREE]) - -CHOICES = tuple([(item[0],item[4]) for item in INFO_ALL]) +CCCHOICES = tuple([(item[0], item[2]) for item in INFO_CC]) +FREECHOICES = tuple([(item[0], item[2]) for item in INFO_FREE]) -CCGRANTS = tuple([(item[0],item[3]) for item in INFO_CC]) +CHOICES = tuple([(item[0], item[4]) for item in INFO_ALL]) -GRANTS = tuple([(item[0],item[3]) for item in INFO_ALL]) +CCGRANTS = tuple([(item[0], item[3]) for item in INFO_CC]) -LICENSE_LIST = [item[0] for item in INFO_CC] -LICENSE_LIST_ALL = [item[0] for item in INFO_ALL] +GRANTS = tuple([(item[0], item[3]) for item in INFO_ALL]) + +LICENSE_LIST = [item[0] for item in INFO_CC] +LICENSE_LIST_ALL = [item[0] for item in INFO_ALL] LICENSE_NAMES_ALL = [item[2] for item in INFO_ALL] LICENSE_URLS_ALL = [item[3] for item in INFO_ALL] -FACET_LIST = [item[1] for item in INFO_ALL] +FACET_LIST = [item[1] for item in INFO_ALL] RIGHTS_ALIAS = { "Public domain in the USA.":"PD-US", @@ -53,8 +53,7 @@ def url(license): license = RIGHTS_ALIAS.get(license, license) if license in LICENSE_LIST_ALL: return INFO_ALL[LICENSE_LIST_ALL.index(license)][3] - else: - return '' + return '' @staticmethod def badge(license): @@ -80,40 +79,38 @@ def badge(license): return '/static/images/lal.png' elif license == 'OSI': return '/static/images/opensource.png' - else: - return '' + return '' def description(license): - if license == 'PD-US': - return 'Use of this material is not restricted by copyright in the US.' - elif license == 'CC0': - return 'The copyright owner has dedicated the material to the public domain by waiving all of his or her rights to the work worldwide under copyright law, including all related and neighboring rights, to the extent allowed by law. You can copy, modify, distribute and perform the work, even for commercial purposes, all without asking permission.' - elif license == 'CC BY': - return 'You are free to: copy and redistribute the material in any medium or format; remix, transform, and build upon the material; for any purpose, even commercially. Under the following terms: You must give appropriate credit, provide a link to the license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the licensor endorses you or your use.' - elif license == 'CC BY-NC-ND': - return 'You are free to: copy and redistribute the material in any medium or format; under the following terms: You must give appropriate credit, provide a link to the license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the licensor endorses you or your use.; you may not use the material for commercial purposes; if you remix, transform, or build upon the material, you may not distribute the modified material.' - elif license == 'CC BY-NC-SA': - return 'You are free to: copy and redistribute the material in any medium or format; remix, transform, and build upon the material; Under the following terms: You must give appropriate credit, provide a link to the license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the licensor endorses you or your use. You may not use the material for commercial purposes. If you remix, transform, or build upon the material, you must distribute your contributions under the same license as the original.' - elif license == 'CC BY-NC': - return 'You are free to: copy and redistribute the material in any medium or format; remix, transform, and build upon the material; under the following terms: You must give appropriate credit, provide a link to the license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the licensor endorses you or your use. You may not use the material for commercial purposes.' - elif license == 'CC BY-SA': - return 'You are free to: copy and redistribute the material in any medium or format; remix, transform, and build upon the material; for any purpose, even commercially. Under the following terms: You must give appropriate credit, provide a link to the license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the licensor endorses you or your use. If you remix, transform, or build upon the material, you must distribute your contributions under the same license as the original.' - elif license == 'CC BY-ND': - return 'You are free to: copy and redistribute the material in any medium or format; for any purpose, even commercially. Under the following terms: You must give appropriate credit, provide a link to the license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the licensor endorses you or your use. If you remix, transform, or build upon the material, you may not distribute the modified material.' - elif license == 'GFDL': - return 'The purpose of this License is to make a manual, textbook, or other functional and useful document "free" in the sense of freedom: to assure everyone the effective freedom to copy and redistribute it, with or without modifying it, either commercially or noncommercially. Secondarily, this License preserves for the author and publisher a way to get credit for their work, while not being considered responsible for modifications made by others.' - elif license == 'LAL': - return 'Avec la Licence Art Libre, l\'autorisation est donnée de copier, de diffuser et de transformer librement les œuvres dans le respect des droits de l\'auteur.' - elif license == 'OSI': - return 'Open source licenses are licenses that comply with the Open Source Definition — in brief, they allow software to be freely used, modified, and shared. To be approved by the Open Source Initiative (also known as the OSI), a license must go through the Open Source Initiative\'s license review process.' - else: - return '' + if license == 'PD-US': + return 'Use of this material is not restricted by copyright in the US.' + elif license == 'CC0': + return 'The copyright owner has dedicated the material to the public domain by waiving all of his or her rights to the work worldwide under copyright law, including all related and neighboring rights, to the extent allowed by law. You can copy, modify, distribute and perform the work, even for commercial purposes, all without asking permission.' + elif license == 'CC BY': + return 'You are free to: copy and redistribute the material in any medium or format; remix, transform, and build upon the material; for any purpose, even commercially. Under the following terms: You must give appropriate credit, provide a link to the license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the licensor endorses you or your use.' + elif license == 'CC BY-NC-ND': + return 'You are free to: copy and redistribute the material in any medium or format; under the following terms: You must give appropriate credit, provide a link to the license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the licensor endorses you or your use.; you may not use the material for commercial purposes; if you remix, transform, or build upon the material, you may not distribute the modified material.' + elif license == 'CC BY-NC-SA': + return 'You are free to: copy and redistribute the material in any medium or format; remix, transform, and build upon the material; Under the following terms: You must give appropriate credit, provide a link to the license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the licensor endorses you or your use. You may not use the material for commercial purposes. If you remix, transform, or build upon the material, you must distribute your contributions under the same license as the original.' + elif license == 'CC BY-NC': + return 'You are free to: copy and redistribute the material in any medium or format; remix, transform, and build upon the material; under the following terms: You must give appropriate credit, provide a link to the license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the licensor endorses you or your use. You may not use the material for commercial purposes.' + elif license == 'CC BY-SA': + return 'You are free to: copy and redistribute the material in any medium or format; remix, transform, and build upon the material; for any purpose, even commercially. Under the following terms: You must give appropriate credit, provide a link to the license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the licensor endorses you or your use. If you remix, transform, or build upon the material, you must distribute your contributions under the same license as the original.' + elif license == 'CC BY-ND': + return 'You are free to: copy and redistribute the material in any medium or format; for any purpose, even commercially. Under the following terms: You must give appropriate credit, provide a link to the license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the licensor endorses you or your use. If you remix, transform, or build upon the material, you may not distribute the modified material.' + elif license == 'GFDL': + return 'The purpose of this License is to make a manual, textbook, or other functional and useful document "free" in the sense of freedom: to assure everyone the effective freedom to copy and redistribute it, with or without modifying it, either commercially or noncommercially. Secondarily, this License preserves for the author and publisher a way to get credit for their work, while not being considered responsible for modifications made by others.' + elif license == 'LAL': + return 'Avec la Licence Art Libre, l\'autorisation est donnée de copier, de diffuser et de transformer librement les œuvres dans le respect des droits de l\'auteur.' + elif license == 'OSI': + return 'Open source licenses are licenses that comply with the Open Source Definition — in brief, they allow software to be freely used, modified, and shared. To be approved by the Open Source Initiative (also known as the OSI), a license must go through the Open Source Initiative\'s license review process.' + return '' class ccinfo(object): def __init__(self, license): - value=license_value(license) - self.license=value if value else license - + value = license_value(license) + self.license = value if value else license + @property def description(self): return description(self.license) @@ -127,29 +124,28 @@ def url(self): def full_title(self): if self.license in LICENSE_LIST_ALL: return INFO_ALL[LICENSE_LIST_ALL.index(self.license)][2] - else: - return self.license + return self.license @property def title(self): if self.license in LICENSE_LIST_ALL: return INFO_ALL[LICENSE_LIST_ALL.index(self.license)][4] - else: - return self.license + return self.license @property def is_cc(self): return self.license in LICENSE_LIST @property def is_pd(self): return self.license == 'PD-US' - + def __str__(self): return self.license def license_value(facet): if facet in FACET_LIST: return LICENSE_LIST_ALL[FACET_LIST.index(facet)] - else: - return '' + return '' + +MATCH_CC_LICENSE = re.compile(r' BY(-NC)?(-ND|-SA)? ') def match_license(license_string): if license_string in LICENSE_LIST_ALL: @@ -164,6 +160,9 @@ def match_license(license_string): return INFO_ALL[l][0] except ValueError: pass + lic = MATCH_CC_LICENSE.search(license_string) + if lic: + return 'CC{}'.format(lic.group(0).upper()).strip() return RIGHTS_ALIAS.get(license_string, None) MATCH_LICENSE = re.compile(r'creativecommons.org/licenses/([^/]+)/') @@ -172,8 +171,9 @@ def license_from_cc_url(rights_url): return None lic = MATCH_LICENSE.search(rights_url) if lic: - return 'CC {}'.format(lic.group(1).upper()) + license_string = 'CC {}'.format(lic.group(1).upper()) + if license_string in LICENSE_LIST_ALL: + return license_string if rights_url.find('openedition.org') >= 0: return 'OPENEDITION' return '' - diff --git a/core/covers.py b/core/covers.py new file mode 100644 index 000000000..143f31f12 --- /dev/null +++ b/core/covers.py @@ -0,0 +1,128 @@ +""" handle caching and thumbnailing of covers """ + +import logging + +from django.utils.functional import LazyObject + +import sorl + +from sorl.thumbnail import get_thumbnail as sorl_get_thumbnail + +from sorl.thumbnail.base import ThumbnailBackend +from sorl.thumbnail.conf import settings, defaults as default_settings +from sorl.thumbnail.helpers import get_module_class +from sorl.thumbnail.images import BaseImageFile, ImageFile +from sorl.thumbnail import default + +from celery.utils.log import get_logger +celerylogger = get_logger(__name__) + +import regluit + +logger = logging.getLogger(__name__) + +DEFAULT_COVER_LARGE = '/static/images/generic_cover_full.png' +DEFAULT_COVER = '/static/images/generic_cover_larger.png' +DEFAULT_COVER_SMALL = '/static/images/generic_cover_thumb.png' + +_storage = None + +class Storage(LazyObject): + ''' + Monkey patch to fix S3 backend slowness in sorl.thumbnail + https://github.com/jazzband/sorl-thumbnail/issues/301 + ''' + def _setup(self): + global _storage + if not _storage: + _storage = get_module_class(settings.THUMBNAIL_STORAGE)() + + self._wrapped = _storage + +sorl.thumbnail.default.storage = Storage() + + +class DefaultImageFile(BaseImageFile): + is_default = True + + def __init__(self, geometry_string='x550'): + if geometry_string == '128': + self._url = DEFAULT_COVER + self.size = (131, 192) + elif geometry_string == 'x80': + self._url = DEFAULT_COVER_SMALL + self.size = (55, 80) + else: + self._url = DEFAULT_COVER_LARGE + self.size = (376, 550) + + @property + def url(self): + return self._url + + def exists(self): + return True + +class ReadOnlyThumbnailBackend(ThumbnailBackend): + """ + A backend that never makes a new thumbnail, but adds missing thumbnails to a task queue + """ + + def get_thumbnail(self, file_, geometry_string, **options): + """ + Returns thumbnail as an ImageFile instance for file with geometry and + options given. It will try to get it from the key value store, + otherwise return a Dummy. + """ + logger.debug('Getting thumbnail for file [%s] at [%s]', file_, geometry_string) + + if file_: + source = ImageFile(file_) + else: + raise ValueError('falsey file_ argument in get_thumbnail()') + + # preserve image filetype + if settings.THUMBNAIL_PRESERVE_FORMAT: + options.setdefault('format', self._get_format(source)) + + for key, value in self.default_options.items(): + options.setdefault(key, value) + + for key, attr in self.extra_options: + value = getattr(settings, attr) + if value != getattr(default_settings, attr): + options.setdefault(key, value) + + name = self._get_thumbnail_filename(source, geometry_string, options) + thumbnail = ImageFile(name, default.storage) + cached = default.kvstore.get(thumbnail) + + if cached: + setattr(cached, 'is_default', False) + return cached + + logger.info('tasking a new thumbnail for %s, %s', file_, geometry_string) + args = [file_, geometry_string] + try: + regluit.core.tasks.make_cover_thumbnail.apply_async( + args=args, kwargs=options, retry=False) + except regluit.core.tasks.make_cover_thumbnail.OperationalError as exc: + logger.error('failed new thumbnail for %s, %s', file_, geometry_string) + celerylogger.exception('Sending task raised: %r', exc) + return DefaultImageFile(geometry_string) + + +backend = ReadOnlyThumbnailBackend() +get_thumbnail = backend.get_thumbnail + +def make_cover_thumbnail(url, geometry_string, **options): + try: + im = sorl_get_thumbnail(url, geometry_string, **options) + except (IOError, OSError): + logger.error('couldnt make thumbnail for %s, probably UnidentifiedImageError', url) + return False + + if im.exists(): + return True + logger.error('couldnt make thumbnail for %s, got non-existent im', url) + return False diff --git a/core/epub.py b/core/epub.py index 27dc44ce2..7aa71a95c 100644 --- a/core/epub.py +++ b/core/epub.py @@ -1,27 +1,26 @@ """ Utilities that manipulate epub files """ - -from regluit.pyepub import EPUB, InvalidEpub -from StringIO import StringIO +from io import StringIO, BytesIO +from pyepub import EPUB, InvalidEpub from django.template.loader import render_to_string def personalize(epub_file, acq): output = EPUB(epub_file, "a") context={'acq':acq} - part = StringIO(unicode(render_to_string('epub/datedcc_license.xhtml', context))) + part = StringIO(str(render_to_string('epub/datedcc_license.xhtml', context))) output.addpart(part, "datedcc_license.xhtml", "application/xhtml+xml", 1) #after title, we hope output.addmetadata('rights','%s after %s'%(acq.work.last_campaign().license_url,acq.work.last_campaign().cc_date)) - personalized_epub= StringIO() + personalized_epub= BytesIO() output.writetodisk(personalized_epub) #logger.info("personalized") return personalized_epub def ask_epub(epub_file, context): output = EPUB(epub_file, "a") - part = StringIO(unicode(render_to_string('epub/ask.xhtml', context))) + part = StringIO(str(render_to_string('epub/ask.xhtml', context))) output.addpart(part, "ask.xhtml", "application/xhtml+xml", 1) #after title, we hope - asking_epub= StringIO() + asking_epub = BytesIO() output.writetodisk(asking_epub) return asking_epub @@ -29,7 +28,7 @@ def ask_epub(epub_file, context): def ungluify(epub_file, campaign): output = EPUB(epub_file, "a") context={'campaign':campaign} - part = StringIO(unicode(render_to_string('epub/cc_license.xhtml', context))) + part = StringIO(str(render_to_string('epub/cc_license.xhtml', context))) output.addpart(part, "cc_license.xhtml", "application/xhtml+xml", 1) #after title, we hope output.addmetadata('rights', campaign.license_url) output.close() diff --git a/core/facets.py b/core/facets.py index c282455c1..4efd33f4d 100644 --- a/core/facets.py +++ b/core/facets.py @@ -2,6 +2,7 @@ from django.contrib.auth.models import User from django.db.models import Q from regluit.core import cc +from regluit.core.parameters import MAX_FACETS, ORDER_BY_KEYS class BaseFacet(object): facet_name = 'all' @@ -23,18 +24,18 @@ def _filter_model(self, model, query_set): else: return query_set - def __unicode__(self): + def __str__(self): if self.facet_name == 'all': return 'Free eBooks' - return unicode(self.facet_name) + return str(self.facet_name) @property def title(self): - return self.__unicode__() + return self.__str__() @property def label(self): - return self.__unicode__() + return self.__str__() def get_query_set(self): return self._get_query_set() @@ -66,10 +67,15 @@ def template(self): _stash_others = None def get_other_groups(self): + used = self.facets() + if len(used) >= MAX_FACETS: + # don't show more facets + return [] + if self._stash_others != None: return self._stash_others + others = [] - used = self.facets() for group in facet_groups: in_use = False for facet in used: @@ -78,12 +84,12 @@ def get_other_groups(self): break if not in_use: others.append(group) - self._stash_others=others + self._stash_others = others return others @property def description(self): - return self.__unicode__() + return self.__str__() class FacetGroup(object): # a FacetGroup should implement title, facets, has_facet(self, facet_name) and get_facet_class(self, facet_name) @@ -106,7 +112,7 @@ class FormatFacetGroup(FacetGroup): def __init__(self): super(FacetGroup,self).__init__() self.title = 'Format' - self.facets = ['pdf', 'epub', 'mobi'] + self.facets = ['pdf', 'epub'] self.label = '{} is ...'.format(self.title) def get_facet_class(self, facet_name): @@ -268,10 +274,10 @@ def set_name(self): self.term=self.facet_name[2:] def get_query_set(self): return self._get_query_set().filter( - Q(title__icontains=self.term) | - Q(editions__authors__name__icontains=self.term) | + Q(title__icontains=self.term) | + Q(editions__authors__name__icontains=self.term) | Q(subjects__name__iexact=self.term) - ) + ) def template(self): return 'facets/search.html' @@ -388,19 +394,10 @@ def get_all_facets(group='all'): def get_facet_object(facet_path): facets = facet_path.replace('//','/').strip('/').split('/') facet_object = None - for facet in facets: + for facet in facets[:MAX_FACETS]: facet_object = get_facet(facet)(facet_object) return facet_object -order_by_keys = { - 'newest':['-featured', '-created'], - 'oldest':['created'], - 'featured':['-featured', '-num_wishes'], - 'popular':['-num_wishes'], - 'title':['title'], - 'none':[], #no ordering -} - def get_order_by(order_by_key): # return the args to use as arguments for order_by - return order_by_keys.get(order_by_key,'') \ No newline at end of file + return ORDER_BY_KEYS.get(order_by_key,'') \ No newline at end of file diff --git a/core/fixtures/basic_campaign_test.json b/core/fixtures/basic_campaign_test.json index cdc498a22..f9ba28a59 100644 --- a/core/fixtures/basic_campaign_test.json +++ b/core/fixtures/basic_campaign_test.json @@ -48,7 +48,7 @@ "groups": [], "user_permissions": [], "password": "pbkdf2_sha256$10000$sILqnpDfTw8Z$djqLomeFeVJIEEqAbp+YqXVOVKI0onS6OwJvpiTEe2g=", - "email": "raymond.yee2@example.org", + "email": "openurl@example.org", "date_joined": "2012-10-14T10:03:43" } }, @@ -56,16 +56,9 @@ "pk": 1, "model": "core.userprofile", "fields": { - "goodreads_auth_secret": null, - "goodreads_user_name": null, "created": "2012-10-12T22:58:33", "tagline": "", - "twitter_id": "", - "goodreads_user_id": null, - "goodreads_auth_token": null, - "goodreads_user_link": null, "user": 1, - "facebook_id": null, "librarything_id": "", "home_url": "", "pic_url": "", @@ -76,16 +69,9 @@ "pk": 2, "model": "core.userprofile", "fields": { - "goodreads_auth_secret": null, - "goodreads_user_name": null, "created": "2012-10-14T09:57:15", "tagline": "", - "twitter_id": "", - "goodreads_user_id": null, - "goodreads_auth_token": null, - "goodreads_user_link": null, "user": 2, - "facebook_id": null, "librarything_id": "", "home_url": "", "pic_url": "", @@ -96,16 +82,9 @@ "pk": 3, "model": "core.userprofile", "fields": { - "goodreads_auth_secret": null, - "goodreads_user_name": null, "created": "2012-10-14T10:03:43", "tagline": "", - "twitter_id": "", - "goodreads_user_id": null, - "goodreads_auth_token": null, - "goodreads_user_link": null, "user": 3, - "facebook_id": null, "librarything_id": "", "home_url": "", "pic_url": "", @@ -549,7 +528,7 @@ "name": "Moby Dick", "edition": null, "amazon_receiver": "", - "deadline": "2023-04-12T23:59:00", + "deadline": "2033-04-12T23:59:00", "details": "

    \r\n\tThe book is already in the public domain, but let's do this again.

    \r\n", "left": "15000.00", "target": "15000.00" diff --git a/core/goodreads.py b/core/goodreads.py deleted file mode 100644 index 6b1e4c988..000000000 --- a/core/goodreads.py +++ /dev/null @@ -1,320 +0,0 @@ -""" -external library imports -""" -import httplib -import json -import logging -import oauth2 as oauth -import re - -from itertools import islice -from requests import request -from urllib import urlencode -from urlparse import urlparse, urlunparse, urljoin -from xml.etree import ElementTree as ET - -""" -django imports -""" -import django.utils.encoding - -""" -regluit imports -""" -import regluit.core -from regluit.core import bookloader, models - -# import parse_qsl from cgi if it doesn't exist in urlparse -try: - from urlparse import parse_qsl -except: - from cgi import parse_qsl - -from django.conf import settings - -logger = logging.getLogger(__name__) - -# QUESTION: should the request_token, access_token be part of the state of the client? -# for simplicity for now, I will make them part of the state of GoodReadsClient - -class GoodreadsException(Exception): - pass - -class GoodreadsAuthorizationRequired(GoodreadsException): - pass - -def filter_none(d): - d2 = {} - for (k,v) in d.iteritems(): - if v is not None: - d2[k] = v - return d2 - -def safe_strip(a_string): - try: - return a_string.strip() - except: - return '' - -class GoodreadsClient(object): - - url = 'https://www.goodreads.com' - request_token_url = urljoin(url,'oauth/request_token') - authorize_url = urljoin(url, '/oauth/authorize') - access_token_url = urljoin(url,'/oauth/access_token') - - def __init__(self,key,secret,user=None, access_token=None): - self.key = key - self.secret = secret - self.consumer = oauth.Consumer(key=self.key, - secret=self.secret) - - self.client = oauth.Client(self.consumer) - #self.unauth_client = None - - if access_token is not None: - self.__load_access_token(access_token) - else: - self.access_token = None - - if user is not None: - self.load_user_access_token(user) - - @property - def is_authorized(self): - return (self.access_token is not None) - - def begin_authorization (self, callback_url=None): - # get request token - response, content = self.client.request(GoodreadsClient.request_token_url, 'GET') - - if int(response['status']) != httplib.OK: - raise Exception('Invalid response: %s' % response['status']) - - request_token = dict(parse_qsl(content)) - - q = {'oauth_token':request_token['oauth_token']} - if callback_url is not None: - q['oauth_callback'] = callback_url - - authorize_link = GoodreadsClient.authorize_url + '?' + urlencode(q) - return (authorize_link, request_token) - - def complete_authorization(self, request_token): - token = oauth.Token(request_token['oauth_token'], - request_token['oauth_token_secret']) - - self.client = oauth.Client(self.consumer, token) - response, content = self.client.request(GoodreadsClient.access_token_url, 'POST') - if int(response['status']) != httplib.OK: - raise Exception('Invalid response: %s' % response['status']) - - access_token_raw = dict(parse_qsl(content)) - self.__load_access_token(access_token_raw) - return access_token_raw - - def load_user_access_token(self,user): - access_token = {'oauth_token':user.profile.goodreads_auth_token, - 'oauth_token_secret':user.profile.goodreads_auth_secret} - self.__load_access_token(access_token) - - def __load_access_token(self, access_token): - token = oauth.Token(access_token['oauth_token'], - access_token['oauth_token_secret']) - self.access_token = token - self.client = oauth.Client(self.consumer, self.access_token) - - def __clear_access_token(self): - self.access_token = None - self.consumer = oauth.Consumer(key=self.key, - secret=self.secret) - - def auth_user(self): - if self.is_authorized: - response, content = self.client.request('%s/api/auth_user' % GoodreadsClient.url, - 'GET') - if int(response['status']) != httplib.OK: - raise GoodreadsException('Error authenticating Goodreads user ' ) - else: - doc = ET.fromstring(content) - user = doc.find('user') - userid = user.get('id') - name = user.find('name').text - link = user.find('link').text - return({'userid':userid, 'name':name, 'link':link}) - else: - raise GoodreadsAuthorizationRequired('Attempt to access auth_user without authorization.') - - def add_book(self, book_id=871441, shelf_name='to-read'): - # the book is: "Moby-Dick: A Pop-Up Book" 871441 - body = urlencode({'name': 'to-read', 'book_id': book_id}) - headers = {'content-type': 'application/x-www-form-urlencoded'} - response, content = self.client.request('%s/shelf/add_to_shelf.xml' % GoodreadsClient.url, - 'POST', body, headers) - # check that the new resource has been created - if int(response['status']) != httplib.CREATED: - raise GoodreadsException('Cannot create resource: %s' % response['status']) - logger.info('response,content: %s | %s ' % (response,content)) - else: - return True - - def review_list_unauth(self, user_id, shelf='all',page=1,sort=None,per_page=20,order='a',search=None,v=2): - path="/review/list.xml" - method = "GET" - params = filter_none({'id':user_id,'shelf':shelf,'page':page,'sort':sort,'per_page':per_page,'order':order, - 'search':search, 'v':2}) - params["key"] = self.key - - request_url = urljoin(GoodreadsClient.url, path) - logger.info("request_url:{0}, params: {1}".format(request_url, params)) - - more_pages = True - - while (more_pages): - - r = request(method,request_url,params=params) - # print request_url, params - if r.status_code != httplib.OK: - raise GoodreadsException('Error in review_list_unauth, http status_code: {0}'.format(r.status_code)) - else: - doc = ET.fromstring(r.content) - # for the moment convert to a iterable of book data presented as dict -- one the way to paging through all results - reviews = doc.findall('reviews/review') - for review in reviews: - yield ({'id':review.find('id').text, - 'book': {'id': safe_strip(review.find('book/id').text), - 'isbn10': review.find('book/isbn').text, - 'isbn13': review.find('book/isbn13').text, - 'title': safe_strip(review.find('book/title').text), - 'text_reviews_count': safe_strip(review.find('book/text_reviews_count').text), - 'link': safe_strip(review.find('book/link').text), - 'small_image_url': safe_strip(review.find('book/small_image_url').text), - 'ratings_count': safe_strip(review.find('book/ratings_count').text), - 'description': safe_strip(review.find('book/description').text)} - }) - if len(reviews) == 0: - more_pages = False - else: - params["page"] += 1 - - - def review_list(self, user_id, shelf='all',page=1,sort=None,per_page=20,order='a',search=None,v=2): - """have to account for situation in which we might need authorized access - for now: assume no need for auth - sort: available_for_swap, position, num_pages, votes, recommender, rating, shelves, format, - avg_rating, date_pub, isbn, comments, author, title, notes, cover, isbn13, review, date_pub_edition, - condition, asin, date_started, owned, random, date_read, year_pub, read_count, date_added, - date_purchased, num_ratings, purchase_location, date_updated (optional) - """ - - path="/review/list.xml" - method = "GET" - params = filter_none({'id':user_id,'shelf':shelf,'page':page,'sort':sort,'per_page':per_page,'order':order, - 'search':search, 'v':2}) - - request_url = urljoin(GoodreadsClient.url, path) - - more_pages = True - - while (more_pages): - - response, content = self.client.request('%s?%s' % (request_url, urlencode(params)), - method) - if int(response['status']) != httplib.OK: - raise GoodreadsException('Error in review_list: ' ) - else: - #logger.info(' %s' % (content)) - doc = ET.fromstring(content) - # for the moment convert to a iterable of book data presented as dict -- one the way to paging through all results - reviews = doc.findall('reviews/review') - for review in reviews: - yield ({'id':review.find('id').text, - 'book': {'id': safe_strip(review.find('book/id').text), - 'isbn10':review.find('book/isbn').text, - 'isbn13':review.find('book/isbn13').text, - 'title':safe_strip(review.find('book/title').text), - 'text_reviews_count':safe_strip(review.find('book/text_reviews_count').text), - 'link':safe_strip(review.find('book/link').text), - 'small_image_url':safe_strip(review.find('book/small_image_url').text), - 'ratings_count':safe_strip(review.find('book/ratings_count').text), - 'description':safe_strip(review.find('book/description').text)} - }) - if len(reviews) == 0: - more_pages = False - else: - params["page"] += 1 - - def shelves_list(self,user_id,page=1): - """BUG to fix: should go through all the pages, not just page 1 - """ - path = "/shelf/list.xml" - params = {'user_id':user_id, 'page':page} - params["key"] = self.key - method = "GET" - request_url = urljoin(GoodreadsClient.url, path) - - r = request(method,request_url,params=params) - - if r.status_code != httplib.OK: - raise GoodreadsException('Error in shelves_list: %s ' % (r.headers)) - else: - logger.info('headers: %s' % (r.headers)) - doc = ET.fromstring(r.content) - shelves = doc.find('shelves') - # do a simple parsing to a dictionary - - d = dict( [ (k,int(shelves.attrib[k])) for k in shelves.attrib ] ) - d["user_shelves"] = [{'name':shelf.find('name').text, - 'book_count':int(shelf.find('book_count').text), - 'description':shelf.find('description').text if shelf.find('description').attrib['nil'] != 'true' else None, - 'exclusive_flag':shelf.find('exclusive_flag').text} \ - for shelf in shelves.findall('user_shelf')] - - d["total_book_count"] = sum([shelf['book_count'] if shelf['exclusive_flag'] == 'true' else 0 for shelf in d["user_shelves"]]) - return d - - -def load_goodreads_shelf_into_wishlist(user, shelf_name='all', goodreads_user_id=None, max_books=None, expected_number_of_books=None): - """ - Load a specified Goodreads shelf (by default: all the books from the Goodreads account associated with user) - """ - - logger.info('Entering load_goodreads_shelf_into_wishlist. user: %s, shelf_name: %s, goodreads_user_id: %s, max_books: %s, expected_number_of_books: %s', - user, shelf_name, goodreads_user_id, max_books, expected_number_of_books) - gc = GoodreadsClient(key=settings.GOODREADS_API_KEY, secret=settings.GOODREADS_API_SECRET, user=user) - - if goodreads_user_id is None: - if user.profile.goodreads_user_id is not None: - goodreads_user_id = user.profile.goodreads_user_id - else: - raise Exception("No Goodreads user_id is associated with user.") - - logger.info('computed goodreads_user_id: %s ', goodreads_user_id) - - for (i, review) in enumerate(islice(gc.review_list(goodreads_user_id,shelf=shelf_name),max_books)): - isbn = review["book"]["isbn10"] if review["book"]["isbn10"] is not None else review["book"]["isbn13"] - logger.info("%d %s %s %s ", i, review["book"]["title"], isbn, review["book"]["small_image_url"]) - try: - edition = bookloader.add_by_isbn(isbn) - if not edition: - continue - # save the goodreads id since we know it at this point - # we need to extract it from the link since review['id'] - # is the id for a users review, not the book - link = review['book']['link'] - match = re.search('/show/(\d+)', link) - if match: - identifier= models.Identifier.get_or_add(type = 'gdrd', value = match.group(1), edition = edition, work = edition.work) - user.wishlist.add_work(edition.work, 'goodreads', notify=True) - logger.info("Work with isbn %s added to wishlist.", isbn) - else: - logger.error("unable to extract goodreads id from %s", link) - if edition.new: - regluit.core.tasks.populate_edition.delay(edition.isbn_13) - - except Exception, e: - logger.info ("Exception adding ISBN %s: %s", isbn, e) - - logger.info('Leaving load_goodreads_shelf_into_wishlist. Length of wishlist for user %s is %s', user, len(user.wishlist.works.all())) - - return user.wishlist diff --git a/core/isbn.py b/core/isbn.py index 9661cf6d2..0d1586de2 100644 --- a/core/isbn.py +++ b/core/isbn.py @@ -165,8 +165,8 @@ def to_string(self, type='13', hyphenate=False): return "%s-%s-%s-%s-%s" % (s[0:3], s[3], s[4:7], s[7:12], s[12]) else: return self.__isbn13 - def __unicode__(self): - return unicode(self.to_string(type=self.type, hyphenate=False)) + def __str__(self): + return str(self.to_string(type=self.type, hyphenate=False)) def __str__(self): s = self.to_string(type=self.type, hyphenate=False) if s is not None: diff --git a/core/librarything.py b/core/librarything.py index ba38f1fe3..57141f050 100644 --- a/core/librarything.py +++ b/core/librarything.py @@ -1,6 +1,6 @@ import csv -import HTMLParser -import httplib +from html import parser as HTMLParser +import http.client as httplib import logging import re from datetime import datetime @@ -140,7 +140,7 @@ def viewstyle_5(self, rows): # lc classification try: book_data["lc_call_number"] = cols[2].xpath('.//span')[0].text - except Exception, e: + except Exception as e: logger.info("no lc call number for: %s %s", book_data["title"], e) book_data["lc_call_number"] = None @@ -156,7 +156,7 @@ def viewstyle_5(self, rows): # check for   if book_data["isbn"] == u'\xA0': book_data["isbn"] = None - except Exception, e: + except Exception as e: book_data["isbn"] = None yield book_data @@ -203,7 +203,7 @@ def parse_user_catalog(self, view_style=1): count_text = etree.xpath('//td[@class="pbGroup"]')[0].text total = int(re.search(r'(\d+)$', count_text).group(1)) logger.info('total: %d', total) - except Exception, e: + except Exception as e: # assume for now that if we can't grab this text, # there is no page bar and no books logger.info('Exception {0}'.format(e)) @@ -277,5 +277,5 @@ def load_librarything_into_wishlist(user, lt_username, max_books=None): if edition.new: tasks.populate_edition.delay(edition.isbn_13) logger.info("Work with isbn %s added to wishlist.", isbn) - except Exception, e: + except Exception as e: logger.info("error adding ISBN %s: %s", isbn, e) diff --git a/core/loaders/__init__.py b/core/loaders/__init__.py index e47ca29ba..0adc6762e 100755 --- a/core/loaders/__init__.py +++ b/core/loaders/__init__.py @@ -1,3 +1,5 @@ +import logging +from ssl import SSLError import requests from bs4 import BeautifulSoup @@ -9,9 +11,12 @@ from .scrape import BaseScraper from .hathitrust import HathitrustScraper from .pressbooks import PressbooksScraper +from .routledge import RoutledgeScraper from .springer import SpringerScraper -from .ubiquity import UbiquityScraper from .smashwords import SmashwordsScraper +from .ubiquity import UbiquityScraper + +logger = logging.getLogger(__name__) def get_scraper(url): scrapers = [ @@ -20,6 +25,7 @@ def get_scraper(url): UbiquityScraper, SmashwordsScraper, HathitrustScraper, + RoutledgeScraper, BaseScraper, ] for scraper in scrapers: @@ -29,32 +35,43 @@ def get_scraper(url): def scrape_sitemap(url, maxnum=None): try: response = requests.get(url, headers={"User-Agent": settings.USER_AGENT}) - doc = BeautifulSoup(response.content, 'lxml') + doc = BeautifulSoup(response.content, 'xml') for page in doc.find_all('loc')[0:maxnum]: scraper = get_scraper(page.text) if scraper.metadata.get('genre', None) == 'book': yield scraper except requests.exceptions.RequestException as e: logger.error(e) + except SSLError as e: + logger.error(e) -def add_by_webpage(url, work=None, user=None): - edition = None - scraper = get_scraper(url) - loader = BasePandataLoader(url) +def add_by_metadata(metadata, url='', work=None, user=None): pandata = Pandata() - pandata.metadata = scraper.metadata + loader = BasePandataLoader(url) + pandata.metadata = metadata for metadata in pandata.get_edition_list(): edition = loader.load_from_pandata(metadata, work) - work = edition.work + if hasattr(edition, 'work'): + work = edition.work + else: + return None loader.load_ebooks(pandata, edition, user=user) return edition if edition else None - +def add_by_webpage(url, work=None, user=None): + if not url: + return None + edition = None + scraper = get_scraper(url) + return add_by_metadata(scraper.metadata, url=url, work=None, user=None) + + def add_by_sitemap(url, maxnum=None): return add_from_bookdatas(scrape_sitemap(url, maxnum=maxnum)) def scrape_language(url): scraper = get_scraper(url) - return scraper.metadata.get('language') + language = scraper.metadata.get('language') + return language if language else 'xx' diff --git a/core/loaders/doab.py b/core/loaders/doab.py index 6d3643286..99d33bff6 100644 --- a/core/loaders/doab.py +++ b/core/loaders/doab.py @@ -1,30 +1,33 @@ #!/usr/bin/env python # encoding: utf-8 import datetime -import json import logging import re import requests +from io import BytesIO +from PIL import Image, UnidentifiedImageError + +from django.conf import settings from django.db.models import Q from django.core.files.base import ContentFile from django.core.files.storage import default_storage from oaipmh.client import Client -from oaipmh.error import IdDoesNotExistError -from oaipmh.metadata import MetadataRegistry, oai_dc_reader +from oaipmh.error import IdDoesNotExistError, NoRecordsMatchError +from oaipmh.metadata import MetadataRegistry from regluit.core import bookloader, cc from regluit.core import models, tasks from regluit.core.bookloader import merge_works -from regluit.core.isbn import ISBN -from regluit.core.loaders.utils import type_for_url -from regluit.core.validation import identifier_cleaner, valid_subject +from regluit.core.models.loader import type_for_url +from regluit.core.validation import identifier_cleaner, valid_subject, explode_bics from . import scrape_language -from .doab_utils import doab_lang_to_iso_639_1, online_to_download, url_to_provider +from .doab_utils import ( + doab_lang_to_iso_639_1, doab_cover, doab_reader, online_to_download, STOREPROVIDERS) logger = logging.getLogger(__name__) @@ -33,7 +36,6 @@ def unlist(alist): return None return alist[0] - SPRINGER_COVER = re.compile(r'ftp.+springer\.de.+(\d{13}\.jpg)$', flags=re.U) SPRINGER_IMAGE = u'https://images.springer.com/sgw/books/medium/{}.jpg' def store_doab_cover(doab_id, redo=False): @@ -41,8 +43,10 @@ def store_doab_cover(doab_id, redo=False): """ returns tuple: 1) cover URL, 2) whether newly created (boolean) """ + if not doab_id: + return (None, False) - cover_file_name = '/doab/%s/cover' % (doab_id) + cover_file_name = '/doab/%s' % doab_id # if we don't want to redo and the cover exists, return the URL of the cover @@ -50,28 +54,47 @@ def store_doab_cover(doab_id, redo=False): return (default_storage.url(cover_file_name), False) # download cover image to cover_file - url = "http://www.doabooks.org/doab?func=cover&rid={0}".format(doab_id) + url = doab_cover(doab_id) + headers = {"User-Agent": settings.USER_AGENT} + if not url: + return (None, False) try: - r = requests.get(url, allow_redirects=False) # requests doesn't handle ftp redirects. + r = requests.get(url, allow_redirects=False, headers=headers) # requests doesn't handle ftp redirects. if r.status_code == 302: redirurl = r.headers['Location'] if redirurl.startswith(u'ftp'): springerftp = SPRINGER_COVER.match(redirurl) if springerftp: redirurl = SPRINGER_IMAGE.format(springerftp.groups(1)) - r = requests.get(redirurl) + r = requests.get(redirurl, headers=headers) else: - r = requests.get(url) - else: - r = requests.get(url) + r = requests.get(url, headers=headers) + if not r.content: + logger.warning('No image content for doab_id=%s', doab_id) + return (None, False) + + #test that cover is good + image_bytes = BytesIO(r.content) + try: + image = Image.open(image_bytes) + except UnidentifiedImageError: + logger.warning(f'No image found for {doab_id}') + return (None, False) + cover_file = ContentFile(r.content) - cover_file.content_type = r.headers.get('content-type', '') + content_type = r.headers.get('content-type', '') + if not 'image/' in content_type: + logger.warning('Non-image returned for doab_id=%s', doab_id) + return (None, False) + cover_file.content_type = content_type + + default_storage.save(cover_file_name, cover_file) return (default_storage.url(cover_file_name), True) - except Exception, e: + except Exception as e: # if there is a problem, return None for cover URL - logger.warning('Failed to make cover image for doab_id={}: {}'.format(doab_id, e)) + logger.warning('Failed to make cover image for doab_id=%s: %s', doab_id, e) return (None, False) def update_cover_doab(doab_id, edition, store_cover=True, redo=True): @@ -82,16 +105,23 @@ def update_cover_doab(doab_id, edition, store_cover=True, redo=True): if store_cover: (cover_url, new_cover) = store_doab_cover(doab_id, redo=redo) else: - cover_url = "http://www.doabooks.org/doab?func=cover&rid={0}".format(doab_id) + cover_url = doab_cover(doab_id) if cover_url is not None: edition.cover_image = cover_url edition.save() + good = edition.cover_image_small() and edition.cover_image_thumbnail() + if not good: + # oh well + logger.warning("Couldn't make thumbnails for %s using %s", doab_id, cover_url) + edition.cover_image = None + edition.save() return cover_url return None def attach_more_doab_metadata(edition, description, subjects, - publication_date, publisher_name=None, language=None, authors=u''): + publication_date, publisher_name=None, language=None, + dois=None, authors=None, editors=None): """ for given edition, attach description, subjects, publication date to @@ -109,10 +139,11 @@ def attach_more_doab_metadata(edition, description, subjects, # attach description to work if it's not empty work = edition.work - if not work.description: - work.description = description + if description and not work.description: + work.description = description.replace('\r\n', '\n') # update subjects + subjects = explode_bics(subjects) for s in subjects: if valid_subject(s): models.Subject.set_by_name(s, work=work) @@ -125,14 +156,18 @@ def attach_more_doab_metadata(edition, description, subjects, work.language = language work.save() - if authors and authors == authors: # test for authors != NaN - authlist = creator_list(authors) + if authors or editors: + authlist = creator_list(authors, editors) if edition.authors.all().count() < len(authlist): edition.authors.clear() if authlist is not None: for [rel, auth] in authlist: edition.add_author(auth, rel) + for doi in dois if dois else []: + if not edition.work.doi: + models.Identifier.set('doi', doi, work=edition.work) + break return edition def add_all_isbns(isbns, work, language=None, title=None): @@ -152,13 +187,12 @@ def add_all_isbns(isbns, work, language=None, title=None): return work, first_edition def load_doab_edition(title, doab_id, url, format, rights, - language, isbns, - provider, **kwargs): - + language, isbns, provider, dois=None, **kwargs): """ load a record from doabooks.org represented by input parameters and return an ebook """ - logger.info('load doab {} {} {} {} {}'.format(doab_id, format, rights, language, provider)) + logger.info('load doab %s %s %s %s %s', doab_id, format, rights, language, provider) + url = url.strip() if language and isinstance(language, list): language = language[0] if language == 'xx' and format == 'online': @@ -181,31 +215,34 @@ def load_doab_edition(title, doab_id, url, format, rights, ebook = None if len(ebooks) > 1: raise Exception("There is more than one Ebook matching url {0}".format(url)) - elif len(ebooks) == 1: + if len(ebooks) == 1: ebook = ebooks[0] - doab_identifer = models.Identifier.get_or_add(type='doab', value=doab_id, - work=ebook.edition.work) - if not ebook.rights: - ebook.rights = rights - ebook.save() - - # update the cover id - cover_url = update_cover_doab(doab_id, ebook.edition, redo=False) - - # attach more metadata - attach_more_doab_metadata( - ebook.edition, - description=unlist(kwargs.get('description')), - subjects=kwargs.get('subject'), - publication_date=unlist(kwargs.get('date')), - publisher_name=unlist(kwargs.get('publisher')), - language=language, - authors=kwargs.get('creator'), - ) - # make sure all isbns are added - add_all_isbns(isbns, ebook.edition.work, language=language, title=title) - return ebook.edition - + if not ebook.edition.work.doab or ebook.edition.work.doab == doab_id: + models.Identifier.get_or_add(type='doab', value=doab_id, work=ebook.edition.work) + + if not ebook.rights: + ebook.rights = rights + ebook.save() + + # update the cover id + update_cover_doab(doab_id, ebook.edition, redo=False) + + # attach more metadata + attach_more_doab_metadata( + ebook.edition, + description=unlist(kwargs.get('description')), + subjects=kwargs.get('subject'), + publication_date=unlist(kwargs.get('date')), + publisher_name=unlist(kwargs.get('publisher')), + language=language, + authors=kwargs.get('creator'), + dois=dois, + ) + # make sure all isbns are added + add_all_isbns(isbns, ebook.edition.work, language=language, title=title) + return ebook.edition + # don't add a second doab to an existing Work + return None # remaining case --> no ebook, load record, create ebook if there is one. assert not ebooks @@ -254,7 +291,7 @@ def load_doab_edition(title, doab_id, url, format, rights, if editions_with_ebooks: edition = editions_with_ebooks[0] elif work.editions.all(): - edition = work.editions.all()[0] + edition = work.editions.first() else: edition = models.Edition(work=work, title=title) edition.save() @@ -263,9 +300,12 @@ def load_doab_edition(title, doab_id, url, format, rights, work.selected_edition = edition work.save() - if format in ('pdf', 'epub', 'mobi', 'html', 'online') and rights: + if format in ('pdf', 'epub', 'html', 'online') and rights: ebook = models.Ebook() - ebook.format = format + if format == 'online' and provider in STOREPROVIDERS: + ebook.format = 'bookstore' + else: + ebook.format = format ebook.provider = provider ebook.url = url ebook.rights = rights @@ -286,7 +326,15 @@ def load_doab_edition(title, doab_id, url, format, rights, publication_date=unlist(kwargs.get('date')), publisher_name=unlist(kwargs.get('publisher')), authors=kwargs.get('creator'), + editors=kwargs.get('editor'), + dois=dois, ) + if rights: + for ebook in edition.ebooks.all(): + if not ebook.rights: + ebook.rights = rights + ebook.save() + return edition # @@ -294,7 +342,8 @@ def load_doab_edition(title, doab_id, url, format, rights, # au = re.compile(r'\(Authors?\)', flags=re.U) -ed = re.compile(r'\([^\)]*(dir.|[Eeé]ds?.|org.|coord.|Editor|a cura di|archivist)[^\)]*\)', flags=re.U) +ed = re.compile(r'\([^\)]*(dir.|[Eeé]ds?.|org.|coord.|Editor|a cura di|archivist)[^\)]*\)', + flags=re.U) tr = re.compile(r'\([^\)]*([Tt]rans.|tr.|translated by)[^\)]*\)', flags=re.U) ai = re.compile(r'\([^\)]*(Introduction|Foreword)[^\)]*\)', flags=re.U) ds = re.compile(r'\([^\)]*(designer)[^\)]*\)', flags=re.U) @@ -311,14 +360,11 @@ def fnf(auth): parts = re.sub(r' +', u' ', auth).split(u',') if len(parts) == 1: return parts[0].strip() - elif len(parts) == 2: + if len(parts) == 2: return u'{} {}'.format(parts[1].strip(), parts[0].strip()) - else: - if parts[1].strip() in ('der', 'van', 'von', 'de', 'ter'): - return u'{} {} {}'.format(parts[2].strip(), parts[1].strip(), parts[0].strip()) - #print auth - #print re.search(namelist,auth).group(0) - return u'{} {}, {}'.format(parts[2].strip(), parts[0].strip(), parts[1].strip()) + if parts[1].strip() in ('der', 'van', 'von', 'de', 'ter'): + return u'{} {} {}'.format(parts[2].strip(), parts[1].strip(), parts[0].strip()) + return u'{} {}, {}'.format(parts[2].strip(), parts[0].strip(), parts[1].strip()) def creator(auth, editor=False): @@ -339,43 +385,59 @@ def creator(auth, editor=False): auth = au.sub('', auth) return ['aut', fnf(auth)] -def creator_list(creators): +def creator_list(creators, editors): auths = [] - for auth in creators: - auths.append(creator(auth)) + if creators: + for auth in creators: + auths.append(creator(auth)) + if editors: + for auth in editors: + auths.append(creator(auth, editor=True)) return auths -DOAB_OAIURL = 'https://www.doabooks.org/oai' -DOAB_PATT = re.compile(r'[\./]doabooks\.org/doab\?.*rid:(\d{1,8}).*') +DOAB_OAIURL = 'https://directory.doabooks.org/oai/request' +DOAB_PATT = re.compile(r'oai:directory\.doabooks\.org:(.*)') mdregistry = MetadataRegistry() -mdregistry.registerReader('oai_dc', oai_dc_reader) +mdregistry.registerReader('oai_dc', doab_reader) doab_client = Client(DOAB_OAIURL, mdregistry) isbn_cleaner = identifier_cleaner('isbn', quiet=True) -ISBNSEP = re.compile(r'[/]+') +doi_cleaner = identifier_cleaner('doi', quiet=True) +ISBNSEP = re.compile(r'[/;]+') def add_by_doab(doab_id, record=None): try: record = record if record else doab_client.getRecord( metadataPrefix='oai_dc', - identifier='oai:doab-books:{}'.format(doab_id) + identifier='oai:directory.doabooks.org:{}'.format(doab_id) ) + if not record[1]: + logger.error('No content in record %s', record) + return None metadata = record[1].getMap() isbns = [] - url = None + dois = [] + urls = [] + for ident in metadata.pop('isbn', []): + isbn_strings = ISBNSEP.split(ident[6:].strip()) + for isbn_string in isbn_strings: + isbn = isbn_cleaner(isbn_string) + if isbn: + isbns.append(isbn) + for ident in metadata.pop('doi', []): + ident = doi_cleaner(ident) + if ident: + dois.append(ident) for ident in metadata.pop('identifier', []): - if ident.startswith('ISBN: '): - isbn_strings = ISBNSEP.split(ident[6:].strip()) - for isbn_string in isbn_strings: - isbn = isbn_cleaner(isbn_string) - if isbn: - isbns.append(isbn) - elif ident.find('doabooks.org') >= 0: + if ident.find('doabooks.org') >= 0: # should already know the doab_id continue - else: - url = ident + if ident.startswith('http'): + urls.append(ident) language = doab_lang_to_iso_639_1(unlist(metadata.pop('language', None))) - urls = online_to_download(url) + xurls = [] + for url in urls: + xurls += online_to_download(url) + urls = xurls edition = None title = unlist(metadata.pop('title', None)) license = cc.license_from_cc_url(unlist(metadata.pop('rights', None))) @@ -383,7 +445,7 @@ def add_by_doab(doab_id, record=None): format = type_for_url(dl_url) if 'format' in metadata: del metadata['format'] - edition = load_doab_edition( + added_edition = load_doab_edition( title, doab_id, dl_url, @@ -391,25 +453,14 @@ def add_by_doab(doab_id, record=None): license, language, isbns, - url_to_provider(dl_url) if dl_url else None, - **metadata - ) - else: - if 'format' in metadata: - del metadata['format'] - edition = load_doab_edition( - title, - doab_id, - '', - '', - license, - language, - isbns, - None, + models.Ebook.infer_provider(dl_url) if dl_url else None, + dois=dois, **metadata ) + edition = added_edition if added_edition else edition return edition - except IdDoesNotExistError: + except IdDoesNotExistError as e: + logger.error(e) return None @@ -419,30 +470,52 @@ def getdoab(url): return id_match.group(1) return False -def load_doab_oai(from_year=None, limit=100000): + +def get_doab_record(doab_id): + record_id = 'oai:directory.doabooks.org:%s' % doab_id + try: + return doab_client.getRecord(metadataPrefix='oai_dc', identifier=record_id) + except IdDoesNotExistError: + return None + +def load_doab_oai(from_date, until_date, limit=100): ''' use oai feed to get oai updates ''' - if from_year: - from_ = datetime.datetime(year=from_year, month=1, day=1) - else: + start = datetime.datetime.now() + if from_date: + from_ = from_date + else: # last 15 days from_ = datetime.datetime.now() - datetime.timedelta(days=15) - doab_ids = [] - for record in doab_client.listRecords(metadataPrefix='oai_dc', from_=from_): - if not record[1]: - continue - item_type = unlist(record[1].getMap().get('type', None)) - if item_type != 'book': - continue - idents = record[1].getMap()['identifier'] - if idents: - for ident in idents: - doab = getdoab(ident) - if doab: - doab_ids.append(doab) - e = add_by_doab(doab, record=record) - title = e.title if e else None - logger.info(u'updated:\t{}\t{}'.format(doab, title)) - if len(doab_ids) > limit: - break + num_doabs = 0 + new_doabs = 0 + lasttime = datetime.datetime(2000, 1, 1) + try: + for record in doab_client.listRecords(metadataPrefix='oai_dc', from_=from_, + until=until_date): + if not record[1]: + continue + item_type = unlist(record[1].getMap().get('type', None)) + if item_type != 'book': + continue + ident = record[0].identifier() + datestamp = record[0].datestamp() + lasttime = datestamp if datestamp > lasttime else lasttime + doab = getdoab(ident) + if doab: + num_doabs += 1 + e = add_by_doab(doab, record=record) + if not e: + logger.error('null edition for doab #%s', doab) + continue + if e.created > start: + new_doabs += 1 + title = e.title if e else None + logger.info(u'updated:\t%s\t%s', doab, title) + if num_doabs >= limit: + break + except NoRecordsMatchError: + pass + return num_doabs, new_doabs, lasttime + \ No newline at end of file diff --git a/core/loaders/doab_utils.py b/core/loaders/doab_utils.py index ceef8bb70..b7aacc09e 100644 --- a/core/loaders/doab_utils.py +++ b/core/loaders/doab_utils.py @@ -2,129 +2,155 @@ doab_utils.py """ - +import logging import re -import urlparse +from ssl import SSLError +from urllib.parse import urljoin import requests -from regluit.utils.lang import get_language_code -from .utils import get_soup - -# utility functions for converting lists of individual items into individual items - -# let's do a mapping of the DOAB languages into the language codes used -# mostly, we just handle mispellings -# also null -> xx - -EXTRA_LANG_MAP = dict([ - (u'chinese', 'de'), - (u'deutsch', 'de'), - (u'eng', 'en'), - (u'englilsh', 'en'), - (u'englilsh', 'en'), - (u'englisch', 'en'), - (u'espanol', 'es'), - (u'ger', 'de'), - (u'fra', 'fr'), - (u'fre', 'fr'), - (u'francese', 'fr'), - (u'ita', 'it'), - (u'italiano', 'it'), - (u'norwegian', 'no'), - (u'por', 'pt'), - (u'portugese', 'pt'), - (u'slovene', 'sl'), - (u'spa', 'es'), - (u'spagnolo', 'es'), -]) - -sep = re.compile(r'[ \-;^,/]+') +from oaipmh.metadata import MetadataReader + +from django.conf import settings + +from regluit.core import models +from regluit.utils.lang import lang_to_language_code +from .soup import get_soup + + +logger = logging.getLogger(__name__) + def doab_lang_to_iso_639_1(lang): - if lang is None or not lang: - return "xx" - else: - lang = sep.split(lang)[0] - code = get_language_code(lang) - if code: - return code - else: - return EXTRA_LANG_MAP.get(lang.lower(), 'xx') - - -DOMAIN_TO_PROVIDER = dict([ - [u'antropologie.zcu.cz', u'AntropoWeb'], - [u'books.mdpi.com', u'MDPI Books'], - [u'books.openedition.org', u'OpenEdition Books'], - [u'books.scielo.org', u'SciELO'], - [u'ccdigitalpress.org', u'Computers and Composition Digital Press'], - [u'digitalcommons.usu.edu', u'DigitalCommons, Utah State University'], - [u'dl.dropboxusercontent.com', u'Dropbox'], - [u'dspace.ucalgary.ca', u'Institutional Repository at the University of Calgary'], - [u'dx.doi.org', u'DOI Resolver'], - [u'ebooks.iospress.nl', u'IOS Press Ebooks'], - [u'hdl.handle.net', u'Handle Proxy'], - [u'hw.oeaw.ac.at', u'Austrian Academy of Sciences'], - [u'img.mdpi.org', u'MDPI Books'], - [u'ledibooks.com', u'LediBooks'], - [u'leo.cilea.it', u'LEO '], - [u'leo.cineca.it', u'Letteratura Elettronica Online'], - [u'link.springer.com', u'Springer'], - [u'oapen.org', u'OAPEN Library'], - [u'press.openedition.org', u'OpenEdition Press'], - [u'windsor.scholarsportal.info', u'Scholars Portal'], - [u'www.adelaide.edu.au', u'University of Adelaide'], - [u'www.aliprandi.org', u'Simone Aliprandi'], - [u'www.antilia.to.it', u'antilia.to.it'], - [u'www.aupress.ca', u'Athabasca University Press'], - [u'www.bloomsburyacademic.com', u'Bloomsbury Academic'], - [u'www.co-action.net', u'Co-Action Publishing'], - [u'www.degruyter.com', u'De Gruyter Online'], - [u'www.doabooks.org', u'Directory of Open Access Books'], - [u'www.dropbox.com', u'Dropbox'], - [u'www.ebooks.iospress.nl', u'IOS Press Ebooks'], - [u'www.ledizioni.it', u'Ledizioni'], - [u'www.maestrantonella.it', u'maestrantonella.it'], - [u'www.oapen.org', u'OAPEN Library'], - [u'www.openbookpublishers.com', u'Open Book Publishers'], - [u'www.palgraveconnect.com', u'Palgrave Connect'], - [u'www.scribd.com', u'Scribd'], - [u'www.springerlink.com', u'Springer'], - [u'www.ubiquitypress.com', u'Ubiquity Press'], - [u'www.unimib.it', u'University of Milano-Bicocca'], - [u'www.unito.it', u"University of Turin"], -]) - -def url_to_provider(url): - netloc = urlparse.urlparse(url).netloc - return DOMAIN_TO_PROVIDER.get(netloc, netloc) - -FRONTIERSIN = re.compile(r'frontiersin.org/books/[^/]+/(\d+)') + lang = lang_to_language_code(lang) + return lang if lang else 'xx' + + +doab_reader = MetadataReader( + fields={ + 'title': ('textList', 'oai_dc:dc/datacite:title/text()'), + 'creator': ('textList', 'oai_dc:dc/datacite:creator/text()'), + 'subject': ('textList', 'oai_dc:dc/datacite:subject/text()'), + 'description': ('textList', 'oai_dc:dc/dc:description/text()'), + 'publisher': ('textList', 'oai_dc:dc/dc:publisher/text()'), + 'editor': ('textList', 'oai_dc:dc/datacite:contributor[@type="Editor"]/text()'), + 'date': ('textList', 'oai_dc:dc/datacite:date[@type="Issued"]/text()'), + 'type': ('textList', 'oai_dc:dc/oaire:resourceType/text()'), + 'format': ('textList', 'oai_dc:dc/dc:format/text()'), + 'identifier': ('textList', 'oai_dc:dc/dc:identifier/text()'), + 'source': ('textList', 'oai_dc:dc/dc:source/text()'), + 'language': ('textList', 'oai_dc:dc/dc:language/text()'), + 'relation': ('textList', 'oai_dc:dc/dc:relation/text()'), + 'coverage': ('textList', 'oai_dc:dc/dc:coverage/text()'), + 'rights': ('textList', 'oai_dc:dc/oaire:licenseCondition/@uri'), + 'isbn': ('textList', 'oai_dc:dc/datacite:alternateIdentifier[@type="ISBN"]/text()'), + 'doi': ('textList', 'oai_dc:dc/datacite:alternateIdentifier[@type="DOI"]/text()'), + }, + namespaces={ + 'oai_dc': 'http://www.openarchives.org/OAI/2.0/oai_dc/', + 'dc' : 'http://purl.org/dc/elements/1.1/', + 'grantor': 'http://purl.org/dc/elements/1.1/', + 'publisher': 'http://purl.org/dc/elements/1.1/', + 'oapen': 'http://purl.org/dc/elements/1.1/', + 'oaire': 'https://raw.githubusercontent.com/rcic/openaire4/master/schemas/4.0/oaire.xsd', + 'datacite': 'https://schema.datacite.org/meta/kernel-4.1/metadata.xsd', + 'doc': 'http://www.lyncode.com/xoai' + } +) +STOREPROVIDERS = [ + '7switch.com', + 'amazon.ca', + 'amazon.co.uk', + 'amazon.com', + 'amazon.de', + 'amzn.to', + 'apress.com', + 'bloomsbury.com', + 'bod.de', + 'booksdirect.co.za', + 'cabi.org', + 'cdcshoppingcart.uchicago.edu', + 'checkout.sas.ac.uk', + 'duncker-humblot.de', + 'dykinson.com', + 'e-elgar.com', + 'edicions.ub.edu', + 'epubli.de', + 'eurekaselect.com', + 'fondazionecafoscari.storeden.com', + 'global.oup.com', + 'iospress.nl', + 'karolinum.cz', + 'librumstore.com', + 'logos-verlag.de', + 'manchesteruniversitypress.co.uk', + 'mitpress.mit.edu', + 'munishop.muni.cz', + 'nai010.com', + 'nomos-shop.de', + 'palgrave.com', + 'placedeslibraires.fr', + 'play.google.com', + 'press.umich.edu', + 'pressesuniversitairesdeliege.be', + 'publicacions.ub.edu', + 'publicacions.urv.cat', + 'schueren-verlag.de', + 'sci.fo', + 'store.printservice.nl', + 'una-editions.fr', + 'universitaetsverlag.uni-kiel.de', + 'universitetsforlaget.no', + 'urldefense.com', + 'usu.edu', + 'uwapress.uw.edu', + 'wbg-wissenverbindet.de', + 'zalozba.zrc-sazu.si', +] def online_to_download(url): urls = [] if not url: return urls - if url.find(u'mdpi.com/books/pdfview/book/') >= 0: - doc = get_soup(url) - if doc: - obj = doc.find('object', type='application/pdf') - if obj: - urls.append(obj['data'].split('#')[0]) - elif url.find(u'books.scielo.org/') >= 0: - doc = get_soup(url) - if doc: - obj = doc.find('a', class_='pdf_file') - if obj: - urls.append(urlparse.urljoin(url, obj['href'])) - obj = doc.find('a', class_='epub_file') - if obj: - urls.append(urlparse.urljoin(url, obj['href'])) - elif FRONTIERSIN.search(url): - booknum = FRONTIERSIN.search(url).group(1) - urls.append(u'https://www.frontiersin.org/GetFile.aspx?ebook={}&fileformat=EPUB'.format(booknum)) - urls.append(u'https://www.frontiersin.org/GetFile.aspx?ebook={}&fileformat=PDF'.format(booknum)) + + elif url.find(u'edp-open.org/books-in-') >= 0: + # pages needing multi-scrape + return urls else: urls.append(url) + if not urls: + logging.warning('no valid download urls for %s', url) return urls + +STREAM_QUERY = 'https://directory.doabooks.org/rest/search?query=handle:{}&expand=bitstreams' + +def get_streamdata(handle): + url = STREAM_QUERY.format(handle) + try: + response = requests.get(url, headers={"User-Agent": settings.USER_AGENT}) + items = response.json() + if items: + for stream in items[0]['bitstreams']: + if stream['bundleName'] == "THUMBNAIL": + stream['handle'] = handle + return stream + else: + logger.error("No items in streamdata for %s", handle) + except requests.exceptions.RequestException as e: + logger.error(e) + except SSLError as e: + logger.error(e) + except ValueError as e: + # decoder error + logger.error(e) + +COVER_FSTRING = "https://directory.doabooks.org/bitstream/handle/{handle}/{name}?sequence={sequenceId}&isAllowed=y" +def doab_cover(doab_id): + stream_data = get_streamdata(doab_id) + if not stream_data: + logger.error('get_streamdata failed for %s', doab_id) + return None + if 'retrieveLink' in stream_data: + return f"https://directory.doabooks.org{stream_data['retrieveLink']}" + return COVER_FSTRING.format(**stream_data) + diff --git a/core/loaders/harvest.py b/core/loaders/harvest.py new file mode 100644 index 000000000..00d4abaf4 --- /dev/null +++ b/core/loaders/harvest.py @@ -0,0 +1,1522 @@ +""" +code for harvesting 'online' ebooks +""" +import json +import logging +import re +import time +from urllib.parse import quote, unquote, urljoin, urlparse, urlsplit, urlunsplit + +import requests + +from django.conf import settings +from django.core.files.base import ContentFile +from django.core.files.storage import default_storage + +from regluit.core import models +from regluit.core.models import loader +from regluit.core.parameters import GOOD_PROVIDERS, DOWNLOADABLE +from regluit.core.pdf import staple_pdf + +from .soup import get_soup +from .doab_utils import STOREPROVIDERS + +logger = logging.getLogger(__name__) + +DROPBOX_DL = re.compile(r'"(https://dl.dropboxusercontent.com/content_link/[^"]+)"') +DELAY = 1.0 + +class RateLimiter(object): + def __init__(self): + self.last = {} + + def delay(self, provider): + if provider in self.last: + prev = self.last[provider] + pres = time.time() + if pres - prev < DELAY: + time.sleep(float(DELAY - pres + prev)) + self.last[provider] = time.time() + return + +rl = RateLimiter() + + +def set_bookshop(ebook): + ebook.format = 'bookshop' + ebook.save() + return None, 0 + + +def dl_online(ebook, limiter=rl.delay, format='online', force=False): + if ebook.format != format or (not force and ebook.provider in DONT_HARVEST): + return None, 0 + if ebook.provider in STOREPROVIDERS: + return set_bookshop(ebook) + if ebook.ebook_files.exists(): + return ebook.ebook_files.first(), 0 + for do_harvest, harvester in harvesters(ebook): + if do_harvest: + for ebf in ebf_if_harvested(ebook.url): + clean_archive(ebf) + return ebf, 0 + limiter(ebook.provider) + return harvester(ebook) + return None, 0 + + +def archive_dl(ebook, limiter=rl.delay, force=False): + """ status codes + 0 : archive exists + 1 : archive made + -1 : urls does not return an ebook file + """ + status = -1 + ebf = None + if ebook.ebook_files.filter(asking=False).exists(): + status = 0 + elif models.EbookFile.objects.filter(source=ebook.url, format=ebook.format).exists(): + status = 0 + else: + dl_cf, fmt = loader.load_ebookfile(ebook.url, ebook.format) + if dl_cf: + ebf, num = make_harvested_ebook(dl_cf, ebook, fmt, filesize=dl_cf.size) + clean_archive(ebf) + status = 1 + else: + logger.warning('download format %s for %s is not ebook', ebook.format, ebook.url) + limiter(ebook.provider) + if not ebf: + status = -1 + return status + + +def clean_archive(ebf): + fsize = ebf.ebook.filesize + ebook = ebf.ebook + if not fsize or ebf.asking == 1 or ebook.format not in DOWNLOADABLE or not ebook.active: + return + # find duplicate files by looking at filesize + old_ebooks = models.Ebook.objects.filter(filesize=fsize, provider=ebf.ebook.provider, + edition__work=ebf.edition.work, format=ebf.format + ).exclude(id=ebf.ebook.id) + for old_ebook in old_ebooks: + old_ebook.active = False + for oldebf in old_ebook.ebook_files.exclude(id=ebf.id): + if oldebf.file != ebf.file: + # save storage by deleting redundant files + oldebf.file.delete() + oldebf.file = ebf.file + oldebf.save() + old_ebook.save() + + +CMPPROVIDERS = [ + 'books.open.tudelft.nl', + 'ebooks.epublishing.ekt.gr', + 'ebooks.marilia.unesp.br', + 'ebooks.uminho.pt', + 'editorial.inudi.edu.pe', + 'editorial.ucatolicaluisamigo.edu.co', + 'editorial.uniagustiniana.edu.co', + 'editorialgrupo-aea.com', + 'fcjp.derecho.unap.edu.pe', + 'fedoabooks.unina.it', + 'humanities-digital-library.org', + 'idicap.com', + 'libri.unimi.it', + 'libros.unad.edu.co', + 'libros.usc.edu.co', + 'llibres.urv.cat', + 'monografias.editorial.upv.es', + 'monograph.com.ua', + 'monographs.uc.pt', + 'omp.ub.rub.de', + 'openuctpress.uct.ac.za', + 'omp.zrc-sazu.si', + 'openpress.mtsu.edu', + 'omp.ub.rub.de', + 'penerbit.brin.go.id', + 'press.uni.lodz.pl', + 'redliclibros.com', + 'Scholars Portal', + 'teiresias-supplements.mcgill.ca', + 'textbooks.open.tudelft.nl', + 'unicapress.unica.it', +] + +DSPACEPROVIDERS = [ + 'acikerisim.kapadokya.edu.tr', + 'diposit.ub.edu', + 'orbi.ulg.ac.be', + 'orbi.uliege.be', + 'publikationen.uni-tuebingen.de', + '', +] + +DONT_HARVEST = [ + 'Unglue.it', + 'Github', + 'Project Gutenberg', + 'Google Books', + 'OpenEdition Books', +] +MANUAL_HARVEST = [ + 'cabidigitallibrary.org', + 'books.google.be', + 'books.google.ch', + 'books.google.nl', +] + + +def harvesters(ebook): + yield ebook.provider == 'OAPEN Library', harvest_oapen + yield ebook.provider == 'SciELO', harvest_scielo + yield ebook.provider in GOOD_PROVIDERS, harvest_generic + yield ebook.provider in MANUAL_HARVEST, harvest_manual + yield 'dropbox.com/s/' in ebook.url, harvest_dropbox + yield ebook.provider == 'jbe-platform.com', harvest_jbe + yield ebook.provider == u'De Gruyter Online', harvest_degruyter + yield ebook.provider == 'Open Book Publishers', harvest_obp + yield ebook.provider == 'Transcript-Verlag', harvest_transcript + yield ebook.provider == 'shop.budrich.de', harvest_budrich + yield ebook.provider == 'ksp.kit.edu', harvest_ksp + yield ebook.provider in ['repositorio.americana.edu.co'], harvest_dspace2 + yield ebook.provider == 'nomos-elibrary.de', harvest_nomos + yield ebook.provider == 'digitalis.uc.pt', harvest_digitalis + yield 'frontiersin.org' in ebook.provider, harvest_frontiersin + yield ebook.provider in ['Palgrave Connect', 'Springer', 'springer.com'], harvest_springerlink + yield ebook.provider == 'pulp.up.ac.za', harvest_pulp + yield ebook.provider == 'bloomsburycollections.com', harvest_bloomsbury + yield ebook.provider == 'Athabasca University Press', harvest_athabasca + yield 'digitalcommons.usu.edu' in ebook.url, harvest_usu + yield ebook.provider == 'libros.fahce.unlp.edu.ar', harvest_fahce + yield ebook.provider in ['digital.library.unt.edu', 'texashistory.unt.edu'], harvest_unt + yield ebook.provider in DSPACEPROVIDERS, harvest_dspace + yield ebook.provider == 'e-publish.uliege.be', harvest_liege + yield ebook.provider in CMPPROVIDERS, harvest_cmp + yield 'mdpi' in ebook.provider.lower(), harvest_mdpi + yield ebook.provider == 'idunn.no', harvest_idunn + yield ebook.provider == 'press.ucalgary.ca', harvest_calgary + yield ebook.provider in ['Ledizioni', 'bibsciences.org', + 'heiup.uni-heidelberg.de', 'e-archivo.uc3m.es'], harvest_generic + yield ebook.provider in ['funlam.edu.co'], harvest_generic_chrome + yield ebook.provider == 'muse.jhu.edu', harvest_muse + yield ebook.provider == 'direct.mit.edu', harvest_mitpress + yield ebook.provider == 'IOS Press Ebooks', harvest_ios + yield ebook.provider == 'elgaronline.com', harvest_elgar + yield ebook.provider == 'worldscientific.com', harvest_wsp + yield ebook.provider in ['edition-open-access.de', 'edition-open-sources.org'], harvest_mprl + yield ebook.provider == 'rti.org', harvest_rti + yield ebook.provider == 'edoc.unibas.ch', harvest_unibas + yield 'pensoft' in ebook.provider, harvest_pensoft + yield ebook.provider == 'edp-open.org', harvest_edp + yield ebook.provider == 'laboutique.edpsciences.fr', harvest_edpsciences + yield ebook.provider == 'waxmann.com', harvest_waxmann + yield ebook.provider == 'pbsociety.org.pl', harvest_ojs + yield 'sciendo.com' in ebook.provider, harvest_sciendo + yield ebook.provider == 'edition-topoi.org', harvest_topoi + yield ebook.provider == 'meson.press', harvest_meson + yield 'brill' in ebook.provider, harvest_brill + yield ebook.provider == 'DOI Resolver', harvest_doi + yield ebook.provider in ['apps.crossref.org', 'mr.crossref.org'], harvest_doi_coaccess + yield ebook.provider == 'ispf-lab.cnr.it', harvest_ipsflab + yield ebook.provider == 'libros.uchile.cl', harvest_libroschile + yield ebook.provider == 'smithsonian.figshare.com', harvest_figshare + yield ebook.provider == 'fupress.com', harvest_fupress + yield ebook.provider == 'funlam.edu.co', harvest_funlam + yield ebook.provider == 'elibrary.duncker-humblot.com', harvest_dunckerhumblot + yield ebook.provider == 'cornellopen.org', harvest_cornellopen + yield ebook.provider == 'esv.info', harvest_esv + yield ebook.provider == 'fulcrum.org', harvest_fulcrum + yield ebook.provider in ('epress.lib.uts.edu.au', 'utsepress.lib.uts.edu.au'), harvest_ubiquity + yield ebook.provider == 'orkana.no', harvest_orkana + yield ebook.provider == 'euna.una.ac.cr', harvest_euna + yield ebook.provider == 'openresearchlibrary.org', harvest_orl + yield ebook.provider == 'pressesagro.be', harvest_pressesagro + yield ebook.provider == 'buponline.com', harvest_buponline + yield ebook.provider == 'intechopen.com', harvest_intech + yield ebook.provider == 'usmcu.edu', harvest_usmcu + yield ebook.provider == 'lalibreria.upv.es', harvest_upv + yield ebook.provider == 'cambridge.org', harvest_cambridge + yield ebook.provider == 'exonpublications.com', harvest_exon + yield ebook.provider == 'ressources.una-editions.fr', harvest_una + yield ebook.provider == 'wbg-wissenverbindet.de', harvest_wbg + yield ebook.provider == 'urn.kb.se', harvest_kb + yield ebook.provider == 'publikationen.bibliothek.kit.edu', harvest_kit + yield ebook.provider == 'iupress.istanbul.edu.tr', harvest_istanbul + yield ebook.provider == 'editorialbonaventuriana.usb.edu.co', harvest_editorialbonaventuriana + yield ebook.provider == 'verlag.gta.arch.ethz.ch', harvest_gta + yield ebook.provider == 'manchesteruniversitypress.co.uk', harvest_manu + yield ebook.provider == 'tectum-elibrary.de', harvest_tecnum + yield ebook.provider == 'benjamins.com', harvest_benjamins + yield ebook.provider == 'macau.uni-kiel.de', harvest_citation_meta_generic + + +def ebf_if_harvested(url): + onlines = models.EbookFile.objects.filter(source=url) + if onlines.exists(): + return onlines + return models.EbookFile.objects.none() + + +def make_dl_ebook(url, ebook, user_agent=settings.USER_AGENT, method='GET', verify=True): + if not url: + logger.warning('no url for ebook %s', ebook.id) + return None, 0 + logger.info('making %s' % url) + + # check to see if url already harvested + for ebf in ebf_if_harvested(url): + # these ebookfiles are created to short-circuit dl_online to avoid re-harvest + if ebf.ebook == ebook: + return ebf, 0 + new_ebf = models.EbookFile.objects.create( + edition=ebf.edition, + format=ebf.format, + file=ebf.file, + source=ebook.url, + ebook=ebook, + ) + logger.info("Previously harvested") + return new_ebf, 0 + + dl_cf, fmt = loader.load_ebookfile(url, ebook.format, + user_agent=user_agent, method=method, verify=verify) + if dl_cf: + return make_harvested_ebook(dl_cf, ebook, fmt, filesize=dl_cf.size) + else: + logger.warning('download format %s for %s is not ebook', ebook.format, url) + return None, 0 + + +def redirect_ebook(ebook, verify=True): + """ returns an ebook and status : + -3 : bad return code or problem + -1 : deleted + -2 : dead, but we need to keep items + 0 : replaced with existing + 1 : url updated + + """ + try: + r = requests.head(ebook.url, allow_redirects=True, verify=verify) + except requests.exceptions.ConnectionError as e: + logger.error("Connection refused for %s", ebook.url) + logger.error(e) + return ebook, -3 + + if r.status_code == 404: + if not models.Ebook.ebook_files.exists(): + logger.info('deleting ebook for dead url', ebook.url) + ebook.delete() + return None, -1 + return ebook, -2 + elif r.status_code == 200: + if ebook.url != r.url: + if models.Ebook.objects.exclude(id=ebook.id).filter(url=r.url).exists(): + existing = models.Ebook.objects.filter(url=r.url)[0] + logger.error(f'ebook {ebook.id} redirects to existing {existing.id}') + ebook.format='redirect' + ebook.save() + return existing, 0 + ebook.url = r.url + ebook.set_provider() + ebook.save() + return ebook, 1 + return ebook, 0 + + logger.error("status code %s for %s", r.status_code, ebook.url) + return ebook, -3 + + +def make_stapled_ebook(urllist, ebook, user_agent=settings.USER_AGENT, strip_covers=False): + pdffile = staple_pdf(urllist, user_agent, strip_covers=strip_covers) + if not pdffile: + return None, 0 + return make_harvested_ebook(ContentFile(pdffile.getvalue()), ebook, 'pdf') + + +def make_harvested_ebook(content, ebook, format, filesize=0): + if not filesize: + filesize = len(content) + new_ebf = models.EbookFile.objects.create( + edition=ebook.edition, + format=format, + source=ebook.url, + ) + try: + new_ebf.file.save(models.path_for_file(new_ebf, None), content) + new_ebf.save() + except MemoryError: #huge pdf files cause problems here + logger.error("memory error saving ebook file for %s", ebook.url) + new_ebf.delete() + return None, 0 + if ebook.format == "online": + harvested_ebook = models.Ebook.objects.create( + edition=ebook.edition, + format=format, + provider='Unglue.it', + url=new_ebf.file.url, + rights=ebook.rights, + filesize=filesize if filesize < 2147483647 else 2147483647, # largest safe integer + version_label=ebook.version_label, + version_iter=ebook.version_iter, + ) + else: + if not ebook.filesize: + ebook.filesize = filesize if filesize < 2147483647 else 2147483647 + ebook.save() + harvested_ebook = ebook + + new_ebf.ebook = harvested_ebook + new_ebf.save() + return new_ebf, 1 + + +def is_bookshop_url(url): + if '/prodotto/' in url: + return True + if ':' in url and url.split(':')[1].startswith('//library.oapen.org/handle/'): + return True + return False + + +def harvest_generic(ebook, user_agent=settings.USER_AGENT): + if is_bookshop_url(ebook.url): + return set_bookshop(ebook) + return make_dl_ebook(ebook.url, ebook, user_agent=user_agent) + +def harvest_generic_chrome(ebook, ): + return make_dl_ebook(ebook.url, ebook, user_agent=settings.CHROME_UA) + + +def harvest_manual(ebook): + def make_manual_ebf(format): + fname = f'mebf/{ebook.id}.{format}' + if default_storage.exists(fname): + filesize = default_storage.size(fname) + new_ebf = models.EbookFile.objects.create( + edition=ebook.edition, + format=format, + source=ebook.url, + ) + new_ebf.file.name = fname + harvested_ebook = models.Ebook.objects.create( + edition=ebook.edition, + format=format, + provider='Unglue.it', + url=new_ebf.file.url, + rights=ebook.rights, + filesize=filesize, + version_label=ebook.version_label, + version_iter=ebook.version_iter, + ) + new_ebf.ebook = harvested_ebook + new_ebf.save() + return new_ebf + else: + return None + pdf_ebf = make_manual_ebf('pdf') + epub_ebf = make_manual_ebf('epub') + + return pdf_ebf or epub_ebf, (1 if pdf_ebf else 0) + (1 if epub_ebf else 0) + + +def harvest_oapen(ebook): + if is_bookshop_url(ebook.url): + return set_bookshop(ebook) + if '/bitstream/' in ebook.url: + return make_dl_ebook(ebook.url, ebook, user_agent=settings.USER_AGENT) + return None, 0 + + +def harvest_one_generic(ebook, selector, user_agent=settings.USER_AGENT): + doc = get_soup(ebook.url, user_agent=user_agent, follow_redirects=True) + if doc: + try: + base = doc.find('base')['href'] + except: + base = ebook.url + obj = selector(doc) + if obj: + dl_url = urljoin(base, obj['href']) + harvest = make_dl_ebook(dl_url, ebook, user_agent=user_agent) + if not harvest[0]: + logger.warning('couldn\'t harvest %s', dl_url) + return harvest + else: + logger.warning('couldn\'t get dl_url for %s', ebook.url) + else: + logger.warning('couldn\'t get soup for %s', ebook.url) + return None, 0 + + +def harvest_multiple_generic(ebook, selector, dl=lambda x:x, + user_agent=settings.USER_AGENT, verify=True): + num = 0 + harvested = None + doc = get_soup(ebook.url, follow_redirects=True, user_agent=user_agent, verify=verify) + if doc: + found = [] + try: + base = doc.find('base')['href'] + except: + base = ebook.url + for obj in selector(doc): + dl_url = dl(urljoin(base, obj.get('href'))) + logger.info(dl_url) + if dl_url in found: + continue + else: + found.append(dl_url) + harvested, made = make_dl_ebook(dl_url, ebook, verify=verify) + num += made + if num == 0: + logger.warning('couldn\'t get any dl_url for %s', ebook.url) + return harvested, num + + +def harvest_stapled_generic(ebook, selector, chap_selector, strip_covers=0, + user_agent=settings.GOOGLEBOT_UA, dl=lambda x:x): + doc = get_soup(ebook.url, user_agent=user_agent, follow_redirects=True) + if doc: + try: + base = doc.find('base')['href'] + except: + base = ebook.url + made = None + + # check for complete ebook + if selector: + obj = selector(doc) + if obj: + dl_url = dl(urljoin(base, obj['href'])) + made = make_dl_ebook(dl_url, ebook) + if made: + return made + + # staple the chapters + pdflinks = [dl(urljoin(base, a['href'])) for a in chap_selector(doc)] + stapled = None + if pdflinks: + stapled = make_stapled_ebook(pdflinks, ebook, user_agent=user_agent, + strip_covers=strip_covers) + if stapled: + return stapled + + logger.warning('couldn\'t make ebook file for %s', ebook.url) + else: + logger.warning('couldn\'t get soup for %s', ebook.url) + return None, 0 + + +OPENBOOKPUB = re.compile(r'openbookpublishers.com/+(reader|product|/?download/book|books)/(10\.11647/OBP\.\d+|\d+)') + +def harvest_obp(ebook): + match = OPENBOOKPUB.search(ebook.url) + booknum = None + if not match: + return None, 0 + if match and match.group(1) in ('product', 'reader'): + prodnum = match.group(2) + prod_url = 'https://www.openbookpublishers.com/product/{}'.format(prodnum) + doc = get_soup(prod_url, settings.GOOGLEBOT_UA) + if doc: + obj = doc.find('button', value='Download') + if obj: + booknum = obj.get('onclick') + if booknum: + booknum = OPENBOOKPUB.search(booknum).group(2) + else: + logger.warning('couldn\'t get soup for %s', prod_url) + elif match and match.group(2).startswith('10.'): + dl_url = 'https://books.openbookpublishers.com/' + match.group(2).lower() + '.pdf' + return make_dl_ebook(dl_url, ebook, user_agent=settings.GOOGLEBOT_UA) + else: + booknum = match.group(2) + if not booknum: + logger.warning('couldn\'t get booknum for %s', ebook.url) + return None, 0 + dl_url = 'https://www.openbookpublishers.com//download/book_content/{}'.format(booknum) + made = make_dl_ebook(dl_url, ebook, user_agent=settings.GOOGLEBOT_UA, method='POST') + return made + + +DEGRUYTERFULL = re.compile(r'/downloadpdf/title/.*') +DEGRUYTERCHAP = re.compile(r'/downloadpdf/book/.*') +COMPLETE = re.compile(r'complete ebook', flags=re.I) +DOWNLOAD = re.compile(r' *download *', flags=re.I) + +def harvest_degruyter(ebook): + ebook, status = redirect_ebook(ebook) + if status < 1: + return None, -1 if status < 0 else 0 + doc = get_soup(ebook.url, settings.GOOGLEBOT_UA) + if doc: + try: + base = doc.find('base')['href'] + except: + base = ebook.url + made = 0 + harvested = None + + # check for epub + obj = doc.select_one('a.ga_download_dropdown_epub_book') + if obj: + dl_url = urljoin(base, obj['href']) + harvested, made = make_dl_ebook(dl_url, ebook, user_agent=settings.GOOGLEBOT_UA) + + # check for pdf + obj = doc.select_one('a.downloadCompletePdfBook') + if obj: + dl_url = urljoin(base, obj['href']) + harvested, madepdf = make_dl_ebook(dl_url, ebook, user_agent=settings.GOOGLEBOT_UA) + made = made + madepdf + if made: + return harvested, made + + # none yet, check for complete ebook + obj = doc.find('a', string=COMPLETE) + if obj: + obj = obj.parent.parent.parent.select_one('a.pdf-link') + else: + obj = doc.find('a', href=DEGRUYTERFULL) + if obj: + dl_url = urljoin(base, obj['href']) + return make_dl_ebook(dl_url, ebook, user_agent=settings.GOOGLEBOT_UA) + + # staple the chapters + pdflinks = [urljoin(base, a['href']) for a in doc.find_all('a', href=DEGRUYTERCHAP)] + stapled = None + if pdflinks: + stapled = make_stapled_ebook(pdflinks, ebook, user_agent=settings.GOOGLEBOT_UA) + if stapled: + return stapled + logger.warning('couldn\'t get dl_url for %s', ebook.url) + else: + logger.warning('couldn\'t get soup for %s', ebook.url) + return None, 0 + + +def harvest_dropbox(ebook): + if ebook.url.find(u'dl=0') >= 0: + dl_url = ebook.url.replace(u'dl=0', u'dl=1') + return make_dl_ebook(dl_url, ebook) + elif ebook.url.find(u'?') < 0: + dl_url = ebook.url + u'?dl=1' + return make_dl_ebook(dl_url, ebook) + response = requests.get(ebook.url, headers={"User-Agent": settings.USER_AGENT}) + if response.status_code == 200: + match_dl = DROPBOX_DL.search(response.content) + if match_dl: + return make_dl_ebook(match_dl.group(1), ebook) + else: + logger.warning('couldn\'t get %s', ebook.url) + else: + logger.warning('couldn\'t get dl for %s', ebook.url) + return None, 0 + + +def harvest_jbe(ebook): + def selector(doc): + return doc.select('div.access-options a[href]') + return harvest_multiple_generic(ebook, selector) + + +def harvest_transcript(ebook): + num = 0 + harvested = None + doc = get_soup(ebook.url) + if doc: + objs = doc.select('a.content--link') + for obj in objs: + dl_url = urljoin(ebook.url, obj['href']) + if dl_url.endswith('.pdf') or dl_url.endswith('.epub'): + harvested, made = make_dl_ebook(dl_url, ebook) + num += made + if not harvested: + logger.warning('couldn\'t get any dl_url for %s', ebook.url) + return harvested, num + + +def harvest_ksp(ebook): + def selector(doc): + return doc.select_one('p.linkForPDF a') + return harvest_one_generic(ebook, selector) + + +def harvest_digitalis(ebook): + def selector(doc): + return doc.select_one('a.item-download-button') + return harvest_one_generic(ebook, selector) + + +def harvest_kit(ebook): + def selector(doc): + return doc.select_one('a.downloadTextLink') + return harvest_one_generic(ebook, selector) + + +def harvest_budrich(ebook): + def selector(doc): + return doc.select_one('a.download_pdf') + return harvest_one_generic(ebook, selector) + + +NOMOSPDF = re.compile('download_full_pdf') +def harvest_nomos(ebook): + doc = get_soup(ebook.url, follow_redirects=True) + try: + base = doc.find('base')['href'] + except: + base = ebook.url + + if doc: + obj = doc.find('a', href=NOMOSPDF) + if obj: + dl_url = urljoin(base, obj['href']) + return make_dl_ebook(dl_url, ebook) + else: + logger.warning('will try stapling a book for %s', ebook.url) + + # staple the chapters + chaps = doc.select('li.access[data-doi]') + + pdflinks = [] + for chap in chaps: + link = urljoin( + 'https://www.nomos-elibrary.de', + chap['data-doi'] + '.pdf?download_full_pdf=1' + ) + if link not in pdflinks: + pdflinks.append(link) + stapled = None + if pdflinks: + stapled = make_stapled_ebook(pdflinks, ebook, user_agent=settings.GOOGLEBOT_UA) + if stapled: + return stapled + else: + logger.warning('couldn\'t staple ebook %s', ebook.url) + else: + logger.warning('couldn\'t get soup for %s', ebook.url) + return None, 0 + + +def harvest_frontiersin(ebook): + if 'GetFile.aspx' in ebook.url: + ebook.delete() + rl.last.pop(ebook.provider, 0) + return None, 0 + + num = 0 + harvested = None + doc = get_soup(ebook.url, follow_redirects=True) + if doc: + for obj in doc.select('button[data-href]'): + dl_url = obj['data-href'] + harvested, made = make_dl_ebook( + dl_url, + ebook, + user_agent=requests.utils.default_user_agent(), + ) + num += made + if num == 0: + logger.warning('couldn\'t get any dl_url for %s', ebook.url) + return harvested, num + + +def harvest_scielo(ebook): + def selector(doc): + return doc.select('a.pdf_file,a.epub_file') + if ebook.url.startswith('http;'): + ebook, status = redirect_ebook(ebook) + if status < 0: + return None, 0 + return harvest_multiple_generic(ebook, selector) + + +def harvest_springerlink(ebook): + def selector(doc): + return doc.select('a[data-book-epub],a[data-book-pdf]') + return harvest_multiple_generic(ebook, selector, user_agent=settings.CHROME_UA) + + +EDOCMAN = re.compile('component/edocman/') +def harvest_pulp(ebook): + def edocman(url): + if not EDOCMAN.search(url): + return + return url + '/download?Itemid=' + dl_url = edocman(ebook.url) + if dl_url: + return make_dl_ebook(dl_url, ebook, user_agent=requests.utils.default_user_agent()) + doc = get_soup(ebook.url) + harvested = None + made = 0 + if doc: + obj = doc.find('a', href=EDOCMAN) + if obj: + dl_url = edocman(urljoin(ebook.url, obj['href'])) + harvested, made = make_dl_ebook(dl_url, ebook, + user_agent=requests.utils.default_user_agent()) + if made == 0: + logger.warning('couldn\'t get any dl_url for %s or %s', ebook.url, dl_url) + return harvested, made + + +def harvest_bloomsbury(ebook): + doc = get_soup(ebook.url, follow_redirects=True) + if doc: + pdflinks = [] + try: + base = doc.find('base')['href'] + except: + base = ebook.url + for obj in doc.select('li.pdf-chapter--title a[href]'): + if obj: + chap = urljoin(base, obj['href']) + '.pdf?dl' + pdflinks.append(chap) + stapled = None + if pdflinks: + stapled = make_stapled_ebook(pdflinks, ebook, strip_covers=True) + if stapled: + return stapled + else: + logger.warning('couldn\'t staple %s', pdflinks) + else: + logger.warning('couldn\'t get soup for %s', ebook.url) + return None, 0 + + +def harvest_athabasca(ebook): + def selector(doc): + return doc.select_one('li.downloadPDF a[href]') + return harvest_one_generic(ebook, selector) + + +def harvest_usu(ebook): + def selector(doc): + return doc.select_one('#full-text a[href]') + return harvest_one_generic(ebook, selector) + + +def harvest_fahce(ebook): + def selector(doc): + return doc.select_one('div.pub_format_single a[href]') + return harvest_one_generic(ebook, selector) + + +def get_meta(doc, term): + obj = doc.find('meta', attrs={"name": term}) + if obj: + return obj.get('content', None) + else: + logger.warning(f'no meta for {term}') + + +BAD_CERTS = { + 'ebooks.marilia.unesp.br', + 'editorial.ucatolicaluisamigo.edu.co', + 'libri.unimi.it', + 'monografias.editorial.upv.es', + 'openpress.mtsu.edu', +} + +def harvest_cmp(ebook): + def selector(doc): + citation_pdf_url = get_meta(doc, "citation_pdf_url") + citation_epub_url = get_meta(doc, "citation_epub_url") + if citation_pdf_url or citation_epub_url: + if citation_pdf_url: + yield {'href': citation_pdf_url} + if citation_epub_url: + yield {'href': citation_epub_url} + else: + found = False + for obj in doc.select('div.entry_details a.cmp_download_link[href]'): + found = True + yield obj + + if not found: + objs = doc.select('.chapters a.cmp_download_link[href], .files a.cmp_download_link[href]') + if (len({obj['href'] for obj in objs})) > 1: + return [] + return doc.select('a.cmp_download_link[href]') + + def dl(url): + return url.replace('view', 'download') + '?inline=1' + + verify = ebook.provider not in BAD_CERTS + if '/view/' in ebook.url: + (ebf, num) = make_dl_ebook(dl(ebook.url), ebook, verify=verify) + if num > 0: + return (ebf, num) + return harvest_multiple_generic(ebook, selector, dl=dl, verify=verify) + + +DSPACEPDF = re.compile(r'/bitstream/.*\.(pdf|epub)') +def harvest_dspace(ebook): + def selector(doc): + return doc.find_all(href=DSPACEPDF) + return harvest_multiple_generic(ebook, selector) + + +def harvest_dspace2(ebook): + doc = get_soup(ebook.url) + if doc: + citation_pdf_url = get_meta(doc, "citation_pdf_url") + if citation_pdf_url: + dl_url = urljoin(ebook.url, citation_pdf_url) + dl_url = dl_url.replace('http://', 'https://') + return make_dl_ebook(dl_url, ebook) + else: + logger.warning('couldn\'t get dl_url for %s', ebook.url) + else: + logger.warning('couldn\'t get soup for %s', ebook.url) + return None, 0 + + +# won't harvest page-image books +def harvest_unt(ebook): + def selector(doc): + return doc.select_one('#link-pdf-version[href]') + return harvest_one_generic(ebook, selector) + + +def harvest_mdpi(ebook): + def selector(doc): + return doc.select_one('div.main-download-container a[alt=download]') + if 'http://books.mdpi.com' in ebook.url: + ebook.delete() + return None, 0 + elif 'img.mdpi.org' in ebook.url: + return harvest_generic(ebook) + elif re.search(r'mdpi\.com/books/pdfview/(book|topic)/', ebook.url): + return harvest_citation_meta_generic(ebook) + return harvest_one_generic(ebook, selector) + + +def harvest_idunn(ebook): + if '/doi/book/' in ebook.url: + return harvest_manual(ebook) + + doc = get_soup(ebook.url) + if doc: + obj = doc.select_one('#accessinfo[data-product-id]') + if obj: + pdf_id = obj.get('data-pdf-id', '') + prod_id = obj.get('data-product-id', '') + filename = obj.get('data-issue-pdf-url', ebook.url[:21]) + dl_url = 'https://www.idunn.no/file/pdf/%s/%s.pdf' % (pdf_id, filename) + ebf, num = make_dl_ebook(dl_url, ebook) + if ebf: + return ebf, num + dl_url = 'https://www.idunn.no/file/pdf/%s/%s.pdf' % (prod_id, filename) + return make_dl_ebook(dl_url, ebook) + return None, 0 + +# some failures are caused by +def harvest_calgary(ebook): + def selector(doc): + # some failures are caused by a fulltext link that points to another html page + return doc.find('a', string=re.compile('Full Text')) + def chap_selector(doc): + return doc.find_all('a', href=re.compile('/bitstream/.+\.pdf')) + return harvest_stapled_generic(ebook, selector, chap_selector, + user_agent=settings.CHROME_UA, strip_covers=2) + + +def harvest_muse(ebook): + def selector(doc): + return doc.select('a.btn_download_full[href]') + def chap_selector(doc): + return doc.find_all('a', href=re.compile(r'/chapter/\d+/pdf')) + harvested, made = harvest_multiple_generic(ebook, selector) + if harvested: + return harvested, made + return harvest_stapled_generic(ebook, None, chap_selector, strip_covers=1) + + +def harvest_mitpress(ebook): + def selector(doc): + return doc.select('a.book-pdfLink[href]') + def chap_selector(doc): + return doc.select('a.section-pdfLink[href]') + return harvest_stapled_generic(ebook, selector, chap_selector, strip_covers=0) + + +def harvest_ios(ebook): + booknum = None + doc = get_soup(ebook.url) + if doc: + obj = doc.find('link', rel='image_src', href=True) + if obj: + booknum = obj['href'].replace('http://ebooks.iospress.nl/Cover/', '') + if booknum: + dl_url = 'http://ebooks.iospress.nl/Download/Pdf?id=%s' % booknum + return make_dl_ebook(dl_url, ebook, method='POST') + else: + logger.warning('couldn\'t get booknum for %s', ebook.url) + else: + logger.warning('couldn\'t get link for %s', ebook.url) + else: + logger.warning('couldn\'t get soup for %s', ebook.url) + return None, 0 + + +def harvest_elgar(ebook): + if 'display' in ebook.url: + url = ebook.url.replace('display', 'downloadpdf')[:-3] + 'pdf' + elif 'monobook-oa' in ebook.url: + url = ebook.url.replace('monobook-oa', 'downloadpdf')[:-3] + 'pdf' + elif 'edcollbook-oa' in ebook.url: + url = ebook.url.replace('edcollbook-oa', 'downloadpdf')[:-3] + 'pdf' + else: + return None, 0 + return make_dl_ebook(url, ebook, user_agent=settings.GOOGLEBOT_UA) + + +def harvest_wsp(ebook): + idmatch = re.search(r'1142/(\d+)', ebook.url) + if idmatch: + url = 'https://www.worldscientific.com/doi/pdf/10.1142/%s?download=true' % idmatch.group(1) + return make_dl_ebook(url, ebook, user_agent=settings.CHROME_UA) + return None, 0 + + +def harvest_mprl(ebook): + def selector(doc): + return doc.select('a.ml-20[href]') + return harvest_multiple_generic(ebook, selector) + + +def harvest_rti(ebook): + def selector(doc): + return doc.find('a', href=re.compile('fulltext.pdf')) + return harvest_one_generic(ebook, selector) + + +def harvest_unibas(ebook): + def selector(doc): + return doc.select_one('a.ep_document_link[href]') + return harvest_one_generic(ebook, selector) + + +PENSOFT = re.compile(r'/book/(\d+)/list/') +def harvest_pensoft(ebook): + if ebook.id == 263395: + book_id = '12847' + elif ebook.url.startswith('https://books.pensoft.net/books/'): + book_id = ebook.url[32:] + elif PENSOFT.search(ebook.url): + book_id = PENSOFT.search(ebook.url).group(1) + else: + return None, 0 + r = requests.get('https://books.pensoft.net/api/books/' + book_id) + if r.status_code == 200: + try: + file_id = r.json()['data']['item_files'][0]['id'] + return make_dl_ebook('https://books.pensoft.net/api/item_files/%s' % file_id, ebook) + except IndexError: + logger.error('no item_file for %s', ebook.url) + return None, 0 + + +def harvest_edp(ebook): + def selector(doc): + return doc.select_one('a.book-dl[href]') + if ebook.url.endswith('.pdf'): + return harvest_generic(ebook, user_agent=settings.CHROME_UA) + return harvest_one_generic(ebook, selector, user_agent=settings.CHROME_UA) + + +def harvest_edpsciences(ebook): + def selector(doc): + return doc.select_one('.article-open-access-download-cell a[href]') + return harvest_one_generic(ebook, selector) + + +def harvest_waxmann(ebook): + ebook.url = ebook.url.replace('http://', 'https://') + if ebook.url.startswith('https://www.waxmann.com/buch'): + return make_dl_ebook(ebook.url.replace('buch', 'index.php?eID=download&buchnr='), ebook) + return None, 0 + + +def harvest_tecnum(ebook): + if ebook.url.startswith('https://doi.org/10.5771/'): + url = 'https://www.tectum-elibrary.de/10.5771/' + ebook.url[24:] + '-I.pdf' + return make_dl_ebook(url, ebook) + return None, 0 + + +def harvest_ojs(ebook): + def selector(doc): + return doc.select('#articleFullText a[href]') + def dl(url): + return url.replace('view', 'download') + '?inline=1' + return harvest_multiple_generic(ebook, selector, dl=dl) + + +def harvest_topoi(ebook): + def selector(doc): + return doc.select_one('li.pdf a[href]') + return harvest_one_generic(ebook, selector) + + +def harvest_meson(ebook): + def selector(doc): + for btn in doc.select('a[href] btn.btn-openaccess'): + yield btn.parent + return harvest_multiple_generic(ebook, selector) + + +def harvest_brill(ebook): + r = requests.get(ebook.url, headers={'User-Agent': settings.GOOGLEBOT_UA}) + if r.url.startswith('https://brill.com/view/title/'): + dl_url = 'https://brill.com/downloadpdf/title/%s.pdf' % r.url[29:] + return make_dl_ebook(dl_url, ebook, user_agent=settings.GOOGLEBOT_UA) + elif r.url.startswith('https://brill.com/display/title/'): + dl_url = 'https://brill.com/downloadpdf/title/%s.pdf' % r.url[32:] + return make_dl_ebook(dl_url, ebook, user_agent=settings.GOOGLEBOT_UA) + elif r.url.startswith('https://brill.com/edcollbook-oa/title/'): + dl_url = 'https://brill.com/downloadpdf/title/%s.pdf' % r.url[38:] + return make_dl_ebook(dl_url, ebook, user_agent=settings.GOOGLEBOT_UA) + return None, 0 + + +def harvest_doi(ebook): + # usually a 404. + ebook, status = redirect_ebook(ebook) + if status == -2: + return None, -1 + return None, 0 + + +def harvest_doi_coaccess(ebook): + # make a new ebook for the "main pub" and ignore the "related pub" + if ebook.url.startswith('https://doi.org/'): + api_url = 'https://apps.crossref.org/search/coaccess?doi=%s' % quote( + ebook.url[16:], safe='') + r = requests.get(api_url) + if r.status_code == 200: + data = r.json() + url = data.get('url', '') + if not url: + return None, 0 + if models.Ebook.objects.exclude(id=ebook.id).filter(url=url).exists(): + # already taken care of + return set_bookshop(ebook) + + # a new ebook + format = loader.type_for_url(url) + if format in ('pdf', 'epub', 'html', 'online'): + new_ebook = models.Ebook() + new_ebook.format = format + new_ebook.url = url + new_ebook.rights = ebook.rights + new_ebook.edition = ebook.edition + new_ebook.set_provider() + if format == "online": + new_ebook.active = False + new_ebook.save() + set_bookshop(ebook) + if format in DOWNLOADABLE: + return make_dl_ebook(url, ebook) + return None, 0 + + +GUID = re.compile(r'FBInit\.GUID = \"([0-9a-z]+)\"') +LIBROSID = re.compile(r'(\d+)$') +LIBROSROOT = 'https://libros.uchile.cl/files/presses/1/monographs/%s/submission/proof/' +LIBROSINDEX = LIBROSROOT + 'index.html' +LIBROSJSON = LIBROSROOT + 'files/assets/html/workspace.js?uni=%s' +LIBRODPDF = LIBROSROOT + 'files/assets/common/downloads/%s?uni=%s' + +def harvest_libroschile(ebook): + booknum = LIBROSID.search(ebook.url).group(1) + if not booknum: + return None, 0 + viewurl = LIBROSINDEX % booknum + doc = get_soup(viewurl) + if not doc: + return None, 0 + hit = doc.find(string=GUID) + if not hit: + return None, 0 + guid = GUID.search(hit) + if not guid: + return None, 0 + jsonurl = LIBROSJSON % (booknum, guid) + try: + json = requests.get(jsonurl).json() + except: + return None, 0 + if not json: + return None, 0 + filename = json.get('downloads',{}).get('url', None) + if not filename: + return None, 0 + pdfurl = LIBRODPDF % (booknum, filename, guid) + return make_dl_ebook(pdfurl, ebook) + + +def harvest_ipsflab(ebook): + def selector(doc): + return doc.find_all('a', href=re.compile(r'/system/files/ispf_lab/quaderni/.*\.(pdf|epub)')) + return harvest_multiple_generic(ebook, selector) + + +def harvest_figshare(ebook): + def selector(doc): + return doc.find('a', href=re.compile(r'/ndownloader/')) + return harvest_one_generic(ebook, selector) + + +def harvest_fupress(ebook): + def selector(doc): + return doc.select_one('#ctl00_contenuto_pdf a.btn-open[href]') + if 'isbn' in ebook.url: + set_bookshop(ebook) + return None, 0 + return harvest_one_generic(ebook, selector) + + +def harvest_funlam(ebook): + if '/modules/' in ebook.url: + set_bookshop(ebook) + return None, 0 + return make_dl_ebook(ebook.url, ebook) + + +def harvest_dunckerhumblot(ebook): + def selector(doc): + return doc.select_one('div.section__buttons a[href$="download"]') + return harvest_one_generic(ebook, selector) + + +def harvest_cornellopen(ebook): + def selector(doc): + return doc.select('div.sp-product__buy-btn-container li a[href]') + return harvest_multiple_generic(ebook, selector) + + +def harvest_editorialbonaventuriana(ebook): + def selector(doc): + return doc.select_one('div.djc_fulltext p a[href$=".pdf"]') + return harvest_one_generic(ebook, selector) + + +def harvest_esv(ebook): + doc = get_soup(ebook.url.replace('details', 'download')) + if doc: + obj = doc.select_one('div.content-box a[href$=".pdf"]') + if obj: + return make_dl_ebook(obj['href'], ebook) + else: + logger.warning('couldn\'t get link for %s', ebook.url) + else: + logger.warning('couldn\'t get soup for %s', ebook.url) + return None, 0 + + +def harvest_fulcrum(ebook): + def selector(doc): + return doc.select('ul.monograph-catalog-rep-downloads a[href]') + return harvest_multiple_generic(ebook, selector) + + +def harvest_ubiquity(ebook): + def selector(doc): + return doc.find_all('a', attrs={'data-category': re.compile('(epub|pdf) download')}) + return harvest_multiple_generic(ebook, selector) + + +def harvest_orkana(ebook): + def selector(doc): + for obj in doc.find_all('p', string=re.compile(r'\((PDF|E-BOK)\)')): + div = obj.find_parent('div') + if div and div.find_next_sibling('div') and div.find_next_sibling('div').find('a'): + yield div.find_next_sibling('div').find('a') + return harvest_multiple_generic(ebook, selector) + + +def harvest_euna(ebook): + if '/view/' in ebook.url: + return make_dl_ebook(ebook.url.replace('view', 'download'), ebook) + set_bookshop(ebook) + return None, 0 + + +def harvest_orl(ebook): + if ebook.url.startswith('https://openresearchlibrary.org/viewer/'): + orl_id = ebook.url[39:] + return make_dl_ebook( + f'https://openresearchlibrary.org/ext/api/media/{orl_id}/assets/external_content.pdf', + ebook) + return None, 0 + + +def harvest_pressesagro(ebook): + def selector(doc): + return doc.select_one('#sidebar ul li span a[href]') + return harvest_one_generic(ebook, selector) + + +def harvest_buponline(ebook): + def selector(doc): + return doc.find('a', string=DOWNLOAD) + return harvest_one_generic(ebook, selector) + + +INTECH = re.compile(r'\.intechopen\.com/books/(\d+)$') +def harvest_intech(ebook): + booknum = INTECH.search(ebook.url) + if booknum: + url = (f'https://mts.intechopen.com/storage/books/{booknum.group(1)}/authors_book/authors_book.pdf') + return make_dl_ebook(url, ebook) + return None, 0 + + +def harvest_usmcu(ebook): + def selector(doc): + return doc.find('a', string='PDF download') + return harvest_one_generic(ebook, selector) + + +def harvest_upv(ebook): + def selector(doc): + return doc.select_one('a.descargar[href]') + return harvest_one_generic(ebook, selector) + + +def harvest_una_editions(ebook): + doc = get_soup(ebook.url) + if doc: + obj = doc.find('a', class_='jet-listing-dynamic-link__link', href=True, string='PDF') + if obj: + return make_dl_ebook(obj['href'], ebook) + else: + logger.warning('couldn\'t get link for %s', ebook.url) + else: + logger.warning('couldn\'t get soup for %s', ebook.url) + return None, 0 + + +def harvest_cambridge(ebook): + ebook, status = redirect_ebook(ebook) + doc = get_soup(ebook.url) + if doc: + obj = doc.find('a', string=re.compile('Full book PDF')) + if obj and obj['href']: + dl_url = urljoin(ebook.url, obj['href']) + return make_dl_ebook(dl_url, ebook) + obj = doc.find('meta', attrs={"name": re.compile("citation_pdf_url")}) + if obj and obj['content']: + dl_url = obj['content'] + return make_dl_ebook(dl_url, ebook) + pdflinks = [] + for obj in doc.select('a[data-pdf-content-id]'): + if obj and obj['href']: + chap = urljoin(ebook.url, obj['href']) + pdflinks.append(chap) + stapled = None + if pdflinks: + stapled = make_stapled_ebook(pdflinks, ebook) + if stapled: + return stapled + else: + logger.warning('couldn\'t staple %s', pdflinks) + else: + logger.warning('couldn\'t get soup for %s', ebook.url) + return None, 0 + + +def harvest_exon(ebook): + doc = get_soup(ebook.url) + if doc: + pdflinks = [] + for obj in doc.select('a.galley-link.pdf[href]'): + if obj and obj['href']: + chap = obj['href'].replace('/view/', '/download/') + pdflinks.append(chap) + stapled = None + if pdflinks: + stapled = make_stapled_ebook(pdflinks, ebook) + if stapled: + return stapled + else: + logger.warning('couldn\'t staple %s', pdflinks) + else: + logger.warning('couldn\'t get soup for %s', ebook.url) + return None, 0 + + +def harvest_una(ebook): + def selector(doc): + return doc.select_one('#header-primary-action a[href]') + return harvest_one_generic(ebook, selector) + + +def harvest_wbg(ebook): + ''' most of these are archived under files.wbg-wissenverbindet.de ''' + doc = get_soup(ebook.url) + if doc: + sku_obj = doc.select_one('span[itemprop=sku]') + sku = sku_obj.text.strip() if sku_obj else None + if sku: + url = f'https://files.wbg-wissenverbindet.de/Files/Article/ARTK_ZOA_{sku}_0001.pdf' + return make_dl_ebook(url, ebook) + return None, 0 + + +def harvest_kb(ebook): + def selector(doc): + return doc.select_one('a[title=fulltext][href]') + return harvest_one_generic(ebook, selector) + + +def harvest_istanbul(ebook): + def cdn_url(soup): + objs = soup.find_all('a', href=re.compile(r'cdn\.istanbul')) + for obj in objs: + yield obj['href'] + def pdf_urls(ebook): + doc = get_soup(ebook.url, user_agent=settings.GOOGLEBOT_UA, follow_redirects=True) + if doc: + for content_url in cdn_url(doc): + yield content_url + for obj in doc.select('div.post-content h5 a.from-journal[href]'): + chap_url = urljoin(ebook.url, obj['href']) + chap_doc = get_soup(chap_url, user_agent=settings.GOOGLEBOT_UA, follow_redirects=True) + if chap_doc: + for content_url in cdn_url(chap_doc): + yield content_url + + # staple the chapters + stapled = make_stapled_ebook(pdf_urls(ebook), ebook, user_agent=settings.GOOGLEBOT_UA) + if stapled: + return stapled + else: + logger.warning('couldn\'t make ebook file for %s', ebook.url) + return None, 0 + + +def harvest_gta(ebook): + # https://verlag.gta.arch.ethz.ch/en/gta:book_978-3-85676-393-0 + pos = ebook.url.find('_') + if pos < 1: + return None, 0 + isbn = ebook.url[pos + 1:] + api_host = 'https://api.verlag.gta.arch.ethz.ch' + json_url = f'{api_host}/api/v1/graphs/gta/data/gtaapi:PublicRetrieveBook/gta:book_{isbn}/' + r = requests.get(json_url) + if r.status_code == 200: + try: + file_url = None + graph = r.json()['@graph'] + for obj in graph: + if "gtaapi:file_url" in obj: + file_url = obj["gtaapi:file_url"] + break + if file_url: + return make_dl_ebook(file_url, ebook) + except IndexError: + logger.error('no item_file for %s', ebook.url) + return None, 0 + + +def harvest_manu(ebook): + def chap_selector(doc): + return doc.select('div.content-box-body div.book-toc a.c-Button--link[href*="/display/"]') + def dl(url): + return url.replace('/display/', '/downloadpdf/').replace('.xml', '.pdf') + doc = get_soup(ebook.url, follow_redirects=True, user_agent=settings.CHROME_UA) + if doc: + obj = doc.find('a', string=re.compile(r"Open Access")) + if not obj or 'href' not in obj.attrs: + return None, 0 + ebook.url = urljoin(ebook.url, obj['href']) + return harvest_stapled_generic(ebook, lambda x: None, chap_selector, + user_agent=settings.CHROME_UA, dl=dl) + return None, 0 + + +def harvest_sciendo(ebook): + def selector(doc): + json_obj = doc.find('script', id='__NEXT_DATA__') + if json_obj: + try: + json_data = json.loads(json_obj.string) + pdf_url = json_data['props']['pageProps']['product']['pdfLink'] + epub_url = json_data['props']['pageProps']['product']['epubLink'] + if pdf_url or epub_url: + if pdf_url: + yield {'href': pdf_url} + if epub_url: + yield {'href': epub_url} + except json.JSONDecodeError as je: + logger.error(f'Bad json {je.msg}') + except KeyError as ke: + logger.error('No links in json for {ebook.url}') + return harvest_multiple_generic(ebook, selector) + +# 2step +def harvest_liege(ebook): + def selector(doc): + urls = [] + pages = doc.find_all('a', href=re.compile(r'/(front|back)-matter/')) + for page in pages: + page_doc = get_soup(page['href'], follow_redirects=True, user_agent=settings.USER_AGENT) + if page_doc: + links = page_doc.find_all('a', href=re.compile(r'orbi\.uliege\.be/(bitstream|handle)/')) + for link in links: + if link['href'] not in urls: + urls.append(link['href']) + pdf = epub = repo = None + for content_url in urls: + if content_url.lower().endswith('.pdf'): + pdf = pdf or content_url + elif content_url.lower().endswith('.epub'): + epub = epub or content_url + else: + repo = repo or content_url + if pdf and epub: + break + if pdf: + yield {'href': pdf} + if epub: + yield {'href': epub} + if repo and not (pdf or epub): + repo_doc = get_soup(repo, follow_redirects=True, user_agent=settings.USER_AGENT) + if repo_doc: + return repo_doc.find_all(href=DSPACEPDF) + + return harvest_multiple_generic(ebook, selector) + +# 2step +def harvest_benjamins(ebook): + def selector(doc): + urls = [] + page = doc.find('a', href=re.compile(r'jbe-platform.com')) + if page: + base = page['href'] + base_doc = get_soup(base, follow_redirects=True) + if base_doc: + links = base_doc.select('.access-options a[href]') + for link in links: + dl_url = urljoin(base, link['href']) + yield {'href': dl_url} + return harvest_multiple_generic(ebook, selector) + +def harvest_citation_meta_generic(ebook): + def selector(doc): + citation_pdf_url = get_meta(doc, "citation_pdf_url") + citation_epub_url = get_meta(doc, "citation_epub_url") + if citation_pdf_url or citation_epub_url: + if citation_pdf_url: + yield {'href': citation_pdf_url} + if citation_epub_url: + yield {'href': citation_epub_url} + return harvest_multiple_generic(ebook, selector) diff --git a/core/loaders/ku.py b/core/loaders/ku.py new file mode 100644 index 000000000..19a36dfb1 --- /dev/null +++ b/core/loaders/ku.py @@ -0,0 +1,171 @@ +import requests +from bs4 import BeautifulSoup +from django.conf import settings + +from regluit.core.validation import ( + authlist_cleaner, + identifier_cleaner, + valid_subject, + validate_date, +) +from regluit.core.bookloader import add_from_bookdatas +from regluit.core.models import EbookFile +from regluit.core.parameters import DOWNLOADABLE + +from .multiscrape import BaseMultiScraper, multiscrape +from .utils import ids_from_urls + +class KUMultiScraper(BaseMultiScraper): + parser_name = 'xml' + can_scrape_hosts = ['app.knowledgeunlatched.org'] + + @classmethod + def divider(cls, doc): + return doc.find_all('Submission') + + @classmethod + def get_response(cls, url): + return cls.login().get(url) + + @classmethod + def login(cls): + s = requests.Session() + credentials = {'username': settings.KU_EMAIL, 'password': settings.KU_PASSWORD} + r = s.get('https://app.knowledgeunlatched.org/login') + auth_url = BeautifulSoup(r.content, "lxml").find(id='kc-form-login')['action'] + r = s.post(auth_url, data=credentials) + return s + + def get_license(self): + val = self.fetch_one_el_content('LicenseURL') + if val: + self.set('rights_url', val) + + def get_title(self): + val = self.fetch_one_el_content('Title') + if val: + self.set('title', val) + + def get_description(self): + val = self.fetch_one_el_content('Description') + coll = self.doc.select_one('Funder ProgramName') + coll = u"
    This book is made open access as part of the Knowledge Unlatched {}".format(coll.text) if coll else '' + if val: + self.set('description', val + coll) + + def get_genre(self): + val = self.fetch_one_el_content('Type') + if val: + self.set('genre', val) + + def get_language(self): + val = self.fetch_one_el_content('Language') + if val: + self.set('language', val) + + def get_keywords(self): + subjects = [self.fetch_one_el_content('PrimarySubject')] + for subject in self.doc.find_all('ManualSubject'): + subjects.append(subject.text) + bisac = self.fetch_one_el_content('BISAC') + if bisac: + subjects.append((u'bisacsh', bisac)) + subjects.append('KUnlatched') + self.set('subjects', subjects) + + def get_publisher(self): + val = self.fetch_one_el_content('PublisherName') + if val: + self.set('publisher', val) + + def get_cover(self): + image_url = self.fetch_one_el_content('Cover') + if image_url: + self.set('covers', [{'image_url': image_url}]) + + def get_pubdate(self): + value = self.fetch_one_el_content('PublicationDate') + if value: + value = validate_date(value) + if value: + self.set('publication_date', value) + + def get_authors(self): + def fullname(auth): + firstname = auth.FirstName.text + lastname = auth.LastName.text + return u'{} {}'.format(firstname, lastname) + authors = self.doc.find_all('Author') + creator_list = [] + role = 'author' + for author in authors: + creator_list.append({'agent_name': fullname(author)}) + role = author.Role.text + self.set('creator', {'{}s'.format(role): creator_list }) + + def get_downloads(self): + fts = DOWNLOADABLE + dls = self.doc.find_all('Document') + for dl in dls: + dlft = dl.Type.text + url = dl.Path.text + for ft in fts: + if ft in dlft: + dlft = ft + break + if url: + self.set('download_url_{}'.format(dlft), url) + + def get_isbns(self): + isbn_cleaner = identifier_cleaner('isbn', quiet=True) + isbns = {} + isbn = isbn_cleaner(self.fetch_one_el_content('IsbnHardback')) + if isbn: + isbns['isbn_hard'] = isbn + isbn = isbn_cleaner(self.fetch_one_el_content('IsbnPaperback')) + if isbn: + isbns['isbn_paper'] = isbn + isbn = isbn_cleaner(self.fetch_one_el_content('IsbnEpdf')) + if isbn: + isbns['isbn_pdf'] = isbn + isbn = isbn_cleaner(self.fetch_one_el_content('IsbnEpub')) + if isbn: + isbns['isbn_epub'] = isbn + return isbns + + def get_identifiers(self): + doi_cleaner = identifier_cleaner('doi', quiet=True) + super(KUMultiScraper, self).get_identifiers() + url = self.fetch_one_el_content('Doi') + if url: + doi = doi_cleaner(url) + if doi: + self.identifiers['doi'] = doi + url = self.fetch_one_el_content('OAPENURL') + if url: + oapn = ids_from_urls(url).get('oapn', None) + if oapn: + self.identifiers['oapn'] = oapn + +ku_rounds = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 15, 16, + 23, 26, 27, 29, 31, 33, 42, 49, 50, 51, 52] + +def load_ku(ku_round=None): + rounds = [ku_round] if ku_round else ku_rounds + editions = [] + for around in rounds: + ku_url = 'https://app.knowledgeunlatched.org/api/rounds/{}/submissions.xml'.format(around) + scrapers = multiscrape(ku_url, scraper_class=KUMultiScraper) + editions.extend(add_from_bookdatas(scrapers)) + return editions + +def activate_ku_ebooks(): + to_activate = EbookFile.objects.filter( + source__startswith='https://app.knowledgeunlatched.org/uploads/', + ebook__active=False, + ) + num_to_activate = to_activate.count() + for ebf in to_activate: + ebf.ebook.activate() + return num_to_activate + diff --git a/core/loaders/multiscrape.py b/core/loaders/multiscrape.py new file mode 100644 index 000000000..40ce5ea2d --- /dev/null +++ b/core/loaders/multiscrape.py @@ -0,0 +1,102 @@ +import logging +import re +from urllib.parse import urljoin + +from bs4 import BeautifulSoup +import requests + +from django.conf import settings + +from regluit.core.bookloader import add_from_bookdatas +from regluit.core.loaders.scrape import BaseScraper +from regluit.core.validation import identifier_cleaner + +logger = logging.getLogger(__name__) +''' +use for web pages with multiple books +returns an iterator of scrapers +''' + +class BaseMultiScraper(BaseScraper): + parser_name = 'lxml' + def __init__(self, url, doc): + self.metadata = {} + self.identifiers = {} + self.doc = doc + self.base = url + self.get_all() + if not self.metadata.get('title', None): + self.set('title', '!!! missing title !!!') + self.metadata['identifiers'] = self.identifiers + + @classmethod + def login(cls): + return requests + +def multiscrape(url, scraper_class=BaseMultiScraper): + try: + response = scraper_class.get_response(url) + if response.status_code == 200: + doc = BeautifulSoup(response.content, scraper_class.parser_name) + sections = scraper_class.divider(doc) + for section in sections: + yield scraper_class(url, section) + except requests.exceptions.RequestException as e: + logger.error(e) + self.metadata = None + + +# following is code specific to edp-open.org; refactor when we add another + + +ISBNMATCH = re.compile(r'([\d\-]+)') +class EDPMultiScraper(BaseMultiScraper): + @classmethod + def divider(cls, doc): + return doc.select('article.Bk') + + def get_isbns(self): + '''return a dict of edition keys and ISBNs''' + isbns = {} + isbn_cleaner = identifier_cleaner('isbn', quiet=True) + labels = ['epub', 'pdf', 'paper'] + info = self.doc.select_one('p.nfo').text + isbntexts = re.split('ISBN', info) + for isbntext in isbntexts[1:]: + isbnmatch = ISBNMATCH.search(isbntext) + if isbnmatch: + isbn = isbn_cleaner(isbnmatch.group(0)) + isbns[labels.pop()] = isbn + return isbns + + def get_downloads(self): + dl = self.doc.select_one('nav.dl') + links = dl.select('a.fulldl') + for link in links: + href = urljoin(self.base, link['href']) + if href.endswith('.pdf'): + self.set('download_url_pdf', href) + elif href.endswith('.epub'): + self.set('download_url_epub', href) + + def get_language(self): + if 'english' in self.base: + self.set('language', 'en') + else: + self.set('language', 'fr') + + def get_title(self): + value = self.doc.select_one('h2').text + book_id = self.doc.select_one('h2')['id'] + self.identifiers['http'] = u'{}#{}'.format(self.base, book_id) + self.set('title', value) + +def edp_scrape(): + edp_urls = [ + 'https://www.edp-open.org/books-in-french', + 'https://www.edp-open.org/books-in-english', + ] + for url in edp_urls: + scrapers = multiscrape(url, scraper_class=EDPMultiScraper) + add_from_bookdatas(scrapers) + diff --git a/core/loaders/pressbooks.py b/core/loaders/pressbooks.py index 47291e896..0e84b7a88 100644 --- a/core/loaders/pressbooks.py +++ b/core/loaders/pressbooks.py @@ -1,18 +1,26 @@ +import re from regluit.core.validation import identifier_cleaner from . import BaseScraper class PressbooksScraper(BaseScraper): - can_scrape_hosts = ['bookkernel.com', 'milnepublishing.geneseo.edu', - 'press.rebus.community', 'pb.unizin.org'] + can_scrape_hosts = [ + 'bookkernel.com', 'milnepublishing.geneseo.edu', 'press.rebus.community', 'pb.unizin.org', + 'opentext.wsu.edu', 'oer.missouriwestern.edu', 'eskript.ethz.ch', 'opentext.lib.vt.edu', + 'opentextbc.ca', + ] can_scrape_strings = ['pressbooks'] def get_downloads(self): - for dl_type in ['epub', 'mobi', 'pdf']: + for dl_type in ['epub', 'pdf']: download_el = self.doc.select_one('.{}'.format(dl_type)) + value = None if download_el and download_el.find_parent(): value = download_el.find_parent().get('href') - if value: - self.set('download_url_{}'.format(dl_type), value) + else: + a = self.doc.find('a', href=re.compile(r'{}$'.format(dl_type))) + value = a.get('href') if a else None + if value: + self.set('download_url_{}'.format(dl_type), value) def get_publisher(self): value = self.get_dt_dd('Publisher') @@ -22,8 +30,10 @@ def get_publisher(self): if value: self.set('publisher', value) else: - super(PressbooksScraper, self).get_publisher() - + value = self.check_metas(['citation_publisher', 'publisher', r'DC\.Source']) + if value: + self.set('publisher', value) + def get_title(self): value = self.doc.select_one('.entry-title a[title]') value = value['title'] if value else None diff --git a/core/loaders/routledge.py b/core/loaders/routledge.py new file mode 100644 index 000000000..808c7a652 --- /dev/null +++ b/core/loaders/routledge.py @@ -0,0 +1,114 @@ +from __future__ import print_function +import re +import requests +from bs4 import BeautifulSoup +from django.conf import settings + +from regluit.core.bookloader import add_from_bookdatas + +from .scrape import BaseScraper + +isbnmatch = re.compile(r'\d{13}') +readbook = re.compile('Read Book') + +class RoutledgeScraper(BaseScraper): + can_scrape_hosts = ['www.routledge.com'] + + def get_keywords(self): + subjects = [] + for sub in self.doc.select('dl.dl-codes dt'): + subjects.append(('bisacsh', sub.string)) + self.set('subjects', subjects) + + def get_author_list(self): + value_list = [] + for auth in self.doc.select('h4.media-author a'): + value_list.append(auth.string) + return value_list + + def get_role(self): + return 'editor' if self.doc.find(string="Edited by ") else 'author' + + def get_isbns(self): + '''return a dict of edition keys and ISBNs''' + def get_isbn(url): + match = isbnmatch.search(url) + if match: + return match.group(0) + + def get_eisbn(eurl): + response = requests.get(eurl, allow_redirects=False) + if response.status_code in (301, 302): + eurl = response.headers['Location'] + return get_isbn(eurl) + + isbns = super(RoutledgeScraper, self).get_isbns() + readbookstr = self.doc.find(string=readbook) + if readbookstr: + eurl = readbookstr.find_parent()['href'] + eisbn = get_eisbn(eurl) + if eisbn: + isbns['ebook'] = eisbn + return isbns + + def get_description(self): + value = self.get_itemprop('description', list_mode='one_item') + if not value: + value = self.check_metas([ + r'dc\.description', + 'og:description', + 'description' + ]) + self.set('description', value) + + def get_publisher(self): + self.set('publisher', "Routledge") + + def get_title(self): + value = self.check_metas([r'dc\.title', 'citation_title', 'og:title', 'title']) + if not value: + value = self.fetch_one_el_content('title') + to_delete = ["(Open Access)", "(Hardback)", "- Routledge"] + for text in to_delete: + value = value.replace(text, "") + self.set('title', value) + + +def load_routledge(): + search_url = "https://www.routledge.com/collections/11526" + + def get_collections(url): + try: + response = requests.get(url, headers={"User-Agent": settings.USER_AGENT}) + if response.status_code == 200: + doc = BeautifulSoup(response.content, 'lxml') + for link in doc.find_all('a', href=re.compile('collections/11526/')): + yield (link.text, "https://www.routledge.com/" + link['href']) + except requests.exceptions.ConnectionError: + print('couldn\'t connect to %s' % search_url) + + def get_coll_books(url): + try: + response = requests.get(url, headers={"User-Agent": settings.USER_AGENT}) + if response.status_code == 200: + doc = BeautifulSoup(response.content, 'lxml') + for link in doc.select('.media-title a'): + yield link['href'] + except requests.exceptions.ConnectionError: + print('couldn\'t connect to %s' % url) + + books = {} + for (subject, coll_url) in get_collections(search_url): + print(subject) + for book_url in get_coll_books(coll_url): + if not book_url in books: + print(book_url) + new_book = RoutledgeScraper(book_url) + new_book.metadata['subjects'].append(subject) + books[book_url] = new_book + else: + books[book_url].metadata['subjects'].append(subject) + print("Harvesting %s books" % len(list[books.values()])) + add_from_bookdatas(books.values()) + return books + \ No newline at end of file diff --git a/core/loaders/scrape.py b/core/loaders/scrape.py index 04a40e708..863d92714 100644 --- a/core/loaders/scrape.py +++ b/core/loaders/scrape.py @@ -1,14 +1,18 @@ import re import logging -from urlparse import urlparse +from urllib.parse import urlparse, urljoin import requests from bs4 import BeautifulSoup #from gitenberg.metadata.pandata import Pandata from django.conf import settings -from urlparse import urljoin from regluit.core import models -from regluit.core.validation import authlist_cleaner, identifier_cleaner, validate_date +from regluit.core.validation import ( + authlist_cleaner, + identifier_cleaner, + valid_subject, + validate_date, +) logger = logging.getLogger(__name__) @@ -22,6 +26,8 @@ class BaseScraper(object): ''' can_scrape_hosts = False can_scrape_strings = False + parser_name = 'lxml' + @classmethod def can_scrape(cls, url): ''' return True if the class can scrape the URL ''' @@ -39,46 +45,47 @@ def can_scrape(cls, url): return True return False - def __init__(self, url): - self.metadata = {} + @classmethod + def get_response(cls, url): + try: + return requests.get(url, headers={"User-Agent": settings.USER_AGENT}) + except requests.exceptions.RequestException as e: + logger.error(e) + + def __init__(self, url, initial={}): + self.metadata = initial self.identifiers = {'http': url} self.doc = None self.base = url - try: - response = requests.get(url, headers={"User-Agent": settings.USER_AGENT}) + response = type(self).get_response(url) + if response: if response.status_code == 200: self.base = response.url - self.doc = BeautifulSoup(response.content, 'lxml') + self.doc = BeautifulSoup(response.content, self.parser_name) for review in self.doc.find_all(itemtype="http://schema.org/Review"): review.clear() - self.setup() - self.get_genre() - self.get_title() - self.get_language() - self.get_description() - self.get_identifiers() - self.get_keywords() - self.get_publisher() - self.get_pubdate() - self.get_authors() - self.get_cover() - self.get_downloads() - self.get_license() - if not self.metadata.get('title', None): - self.set('title', '!!! missing title !!!') - if not self.metadata.get('language', None): - self.set('language', 'en') - except requests.exceptions.RequestException as e: - logger.error(e) - self.metadata = {} - self.metadata['identifiers'] = self.identifiers + self.get_all() + if not self.metadata.get('title', None): + self.set('title', '!!! missing title !!!') + if not self.metadata.get('language', None): + self.set('language', 'en') + self.metadata['identifiers'] = self.identifiers + else: + self.metadata = None + else: + self.metadata = None + # # utilities # def set(self, name, value): - self.metadata[name] = value + if isinstance(value, str): + value= value.strip() + if value or name not in self.metadata: + self.metadata[name] = value + def fetch_one_el_content(self, el_name): data_el = self.doc.find(el_name) @@ -122,7 +129,7 @@ def get_dt_dd(self, name): ''' get the content of
    after a
    containing name''' dt = self.doc.find('dt', string=re.compile(name)) dd = dt.find_next_sibling('dd') if dt else None - return dd.text if dd else None + return dd.text.strip() if dd and dd.text else None def get_itemprop(self, name, **attrs): value_list = [] @@ -137,13 +144,33 @@ def get_itemprop(self, name, **attrs): else: if el.text: value_list.append(el.text) - elif el.has_key('content'): + elif 'content' in el: value_list.append(el['content']) return value_list + + def get_all(self): + self.setup() + self.get_genre() + self.get_title() + self.get_language() + self.get_description() + self.get_identifiers() + self.get_keywords() + self.get_publisher() + self.get_pubdate() + self.get_authors() + self.get_cover() + self.get_downloads() + self.get_license() + self.cleanup() def setup(self): # use this method to get auxiliary resources based on doc pass + + def cleanup(self): + # use this method to process collected data + pass # # getters # @@ -175,8 +202,7 @@ def get_isbns(self): '''return a dict of edition keys and ISBNs''' isbns = {} isbn_cleaner = identifier_cleaner('isbn', quiet=True) - label_map = {'epub': 'EPUB', 'mobi': 'Mobi', - 'paper': 'Paperback', 'pdf':'PDF', 'hard':'Hardback'} + label_map = {'epub': 'EPUB', 'paper': 'Paperback', 'pdf':'PDF', 'hard':'Hardback'} for key in label_map.keys(): isbn_key = 'isbn_{}'.format(key) value = self.check_metas(['citation_isbn'], type=label_map[key]) @@ -195,7 +221,7 @@ def get_isbns(self): def get_identifiers(self): value = self.check_metas([r'DC\.Identifier\.URI']) if not value: - value = self.doc.select_one('link[rel=canonical]') + value = self.doc.select_one('link[rel=canonical][href]') value = value['href'] if value else None value = identifier_cleaner('http', quiet=True)(value) if value: @@ -241,7 +267,11 @@ def get_identifiers(self): def get_keywords(self): value = self.check_metas(['keywords']).strip(',;') if value: - self.set('subjects', re.split(' *[;,] *', value)) + subjects = [] + for subject in re.split(' *[;,] *', value): + if valid_subject(subject): + subjects.append(subject) + self.set('subjects', subjects) def get_publisher(self): value = self.check_metas(['citation_publisher', r'DC\.Source']) @@ -252,8 +282,8 @@ def get_pubdate(self): value = self.get_itemprop('datePublished', list_mode='one_item') if not value: value = self.check_metas([ - 'citation_publication_date', r'DC\.Date\.issued', 'datePublished', - 'books:release_date', 'book:release_date' + 'citation_publication_date', 'copyrightYear', r'DC\.Date\.issued', 'datePublished', + 'books:release_date', 'book:release_date', ]) if value: value = validate_date(value) @@ -304,7 +334,7 @@ def get_cover(self): self.set('covers', [{'image_url': image_url}]) def get_downloads(self): - for dl_type in ['epub', 'mobi', 'pdf']: + for dl_type in ['epub', 'pdf']: dl_meta = 'citation_{}_url'.format(dl_type) value = self.check_metas([dl_meta]) if value: diff --git a/core/loaders/smashwords.py b/core/loaders/smashwords.py index 3c4413ddb..452926126 100644 --- a/core/loaders/smashwords.py +++ b/core/loaders/smashwords.py @@ -1,6 +1,7 @@ import re -from urlparse import urljoin +from urllib.parse import urljoin from regluit.core.loaders.scrape import BaseScraper +from regluit.core.parameters import DOWNLOADABLE SWCAT = re.compile(r'^https://www\.smashwords\.com/books/category.*') class SmashwordsScraper(BaseScraper): @@ -22,7 +23,7 @@ def get_description(self): def get_downloads(self): dldiv = self.doc.select_one('#download') if dldiv: - for dl_type in ['epub', 'mobi', 'pdf']: + for dl_type in DOWNLOADABLE: dl_link = dldiv.find('a', href=re.compile(r'.*\.{}'.format(dl_type))) if dl_link: url = urljoin(self.base,dl_link['href']) diff --git a/core/loaders/soup.py b/core/loaders/soup.py new file mode 100644 index 000000000..6b59cb13b --- /dev/null +++ b/core/loaders/soup.py @@ -0,0 +1,33 @@ +import logging + +from bs4 import BeautifulSoup +import requests + +from django.conf import settings + +logger = logging.getLogger(__name__) + +def get_soup(url, user_agent=settings.USER_AGENT, follow_redirects=False, verify=True): + try: + response = requests.get(url, headers={"User-Agent": user_agent}, + allow_redirects=follow_redirects, verify=verify) + except requests.exceptions.MissingSchema: + response = requests.get('http://%s' % url, headers={"User-Agent": user_agent}) + except requests.exceptions.ConnectionError as e: + logger.error("Connection refused for %s", url) + logger.error(e) + return None + if response.status_code == 200: + soup = BeautifulSoup(response.content, 'lxml') + + # make sure document has a base + if not soup.find('base'): + obj = soup.find('head') + if obj: + obj.append(soup.new_tag("base", href=response.url)) + else: + logger.error('No head for %s', url) + return soup + else: + logger.error('%s returned code %s', url, response.status_code) + return None diff --git a/core/loaders/springer.py b/core/loaders/springer.py index c30e80362..cf6dc8c5c 100644 --- a/core/loaders/springer.py +++ b/core/loaders/springer.py @@ -1,5 +1,6 @@ import re -from urlparse import urljoin +import json +from urllib.parse import urljoin import requests from bs4 import BeautifulSoup @@ -8,6 +9,7 @@ from regluit.core.validation import identifier_cleaner from regluit.core.bookloader import add_from_bookdatas +from regluit.core.parameters import DOWNLOADABLE from .scrape import BaseScraper, CONTAINS_CC @@ -16,9 +18,19 @@ class SpringerScraper(BaseScraper): can_scrape_strings =['10.1007', '10.1057'] + + @classmethod + def get_response(cls, url): + try: + return requests.get(url, headers={"User-Agent": settings.CHROME_UA}) + except requests.exceptions.RequestException as e: + logger.error(e) + def get_downloads(self): - for dl_type in ['epub', 'mobi', 'pdf']: + for dl_type in DOWNLOADABLE: download_el = self.doc.find('a', title=re.compile(dl_type.upper())) + if not download_el: + download_el = self.doc.find('a', attrs={f'data-book-{dl_type}': True}) if download_el: value = download_el.get('href') if value: @@ -26,7 +38,9 @@ def get_downloads(self): self.set('download_url_{}'.format(dl_type), value) def get_description(self): - desc = self.doc.select_one('#book-description') + desc = self.doc.find('div', attrs={'data-component': 'data-unique-selling-points'}) + if not desc: + desc = self.doc.select_one('#book-description') if desc: value = '' for div in desc.contents: @@ -35,11 +49,16 @@ def get_description(self): text = text.replace(u'\xa0', u' ') value = u'{}

    {}

    '.format(value, text) self.set('description', value) + else: + super(SpringerScraper, self).get_description() def get_keywords(self): value = [] for kw in self.doc.select('.Keyword'): value.append(kw.text.strip()) + if len(value) == 0: + for kw in self.doc.select('#keywords-content li.c-article-subject-list__subject'): + value.append(kw.text.strip()) if value: if 'Open Access' in value: value.remove('Open Access') @@ -50,8 +69,10 @@ def get_identifiers(self): el = self.doc.select_one('#doi-url') if el: value = identifier_cleaner('doi', quiet=True)(el.text) - if value: - self.identifiers['doi'] = value + else: + value = identifier_cleaner('doi', quiet=True)(self.check_metas(['doi'])) + if value: + self.identifiers['doi'] = value def get_isbns(self): isbns = {} @@ -65,27 +86,52 @@ def get_isbns(self): value = identifier_cleaner('isbn', quiet=True)(el.text) if value: isbns['electronic'] = value + if len(isbns) > 0: + return isbns + data_json = self.doc.find('script', string=re.compile(r'window\.dataLayer =')) + if data_json: + data_json = data_json.text.strip()[18:] + data = json.loads(data_json.strip(';')) + content = data[0].get('content', None) + if content: + content = content.get('book', None) + if content: + value = identifier_cleaner('isbn', quiet=True)(content.get("pisbn", '')) + if value: + isbns['paper'] = value + value = identifier_cleaner('isbn', quiet=True)(content.get("eisbn", '')) + if value: + isbns['electronic'] = value return isbns + def get_title(self): el = self.doc.select_one('#book-title') value = '' if el: value = el.text.strip() - if value: - value = value.replace('\n', ': ', 1) - self.set('title', value) - if not value: + else: + el = self.doc.select_one('.page-title') + if el: + value = el.text.strip() + if value: + value = value.replace('\n', ': ', 1) + self.set('title', value) + else: super(SpringerScraper, self).get_title() def get_role(self): if self.doc.select_one('#editors'): return 'editor' + if self.doc.find('ul', atts={'data-list-type':"editors"}): + return 'editor' return 'author' - def get_author_list(self): + def get_author_list(self): for el in self.doc.select('.authors__name'): yield el.text.strip().replace(u'\xa0', u' ') + for el in self.doc.select('.c-article-author-list__item'): + yield el.text.strip(', ').replace(u'\xa0', u' ') def get_license(self): '''only looks for cc licenses''' @@ -100,11 +146,16 @@ def get_license(self): self.set('rights_url', lic_url) def get_pubdate(self): - pubinfo = self.doc.select_one('#copyright-info') + pubinfo = self.doc.find(attrs={"data-test": "electronic_isbn_publication_date"}) + if not pubinfo: + pubinfo = self.doc.find(attrs={"data-test": "softcover_isbn_publication_date"}) if pubinfo: - yearmatch = HAS_YEAR.search(pubinfo.string) - if yearmatch: - self.set('publication_date', yearmatch.group(0)) + for yearstring in pubinfo.stripped_strings: + yearmatch = HAS_YEAR.search(yearstring) + if yearmatch: + self.set('publication_date', yearmatch.group(0)) + return + def get_publisher(self): self.set('publisher', 'Springer') @@ -116,7 +167,7 @@ def springer_open_books(startpage, endpage): for page in range(startpage, endpage + 1): url = search_url.format(page) try: - response = requests.get(url, headers={"User-Agent": settings.USER_AGENT}) + response = requests.get(url, headers={"User-Agent": settings.CHROME_UA}) if response.status_code == 200: base = response.url doc = BeautifulSoup(response.content, 'lxml') @@ -124,5 +175,5 @@ def springer_open_books(startpage, endpage): book_url = urljoin(base, link['href']) yield SpringerScraper(book_url) except requests.exceptions.ConnectionError: - print 'couldn\'t connect to %s' % url + print('couldn\'t connect to %s' % url) return add_from_bookdatas(springer_open_books(startpage, endpage)) diff --git a/core/loaders/tests.py b/core/loaders/tests.py index f94e1ad30..83ecd8174 100644 --- a/core/loaders/tests.py +++ b/core/loaders/tests.py @@ -1,7 +1,7 @@ from django.conf import settings from django.test import TestCase from regluit.core.models import Ebook, Edition, Work -from .utils import dl_online +from .harvest import dl_online class LoaderTests(TestCase): def setUp(self): @@ -17,12 +17,13 @@ def test_downloads(self): edition = Edition(work=work) edition.save() - dropbox_url = 'https://www.dropbox.com/s/h5jzpb4vknk8n7w/Jakobsson_The_Troll_Inside_You_EBook.pdf?dl=0' + dropbox_url = 'https://www.dropbox.com/s/azaztyvgf6b98bc/stellar-consensus-protocol.pdf?dl=0' dropbox_ebook = Ebook.objects.create(format='online', url=dropbox_url, edition=edition) dropbox_ebf, new_ebf = dl_online(dropbox_ebook) self.assertTrue(dropbox_ebf.ebook.filesize) - jbe_url = 'http://www.jbe-platform.com/content/books/9789027295958' - jbe_ebook = Ebook.objects.create(format='online', url=jbe_url, edition=edition) + jbe_url = 'https://www.jbe-platform.com/content/books/9789027295958' + jbe_ebook = Ebook.objects.create(format='online', url=jbe_url, edition=edition, + provider='jbe-platform.com') jbe_ebf, new_ebf = dl_online(jbe_ebook) self.assertTrue(jbe_ebf.ebook.filesize) diff --git a/core/loaders/ubiquity.py b/core/loaders/ubiquity.py index c346cec44..899920d29 100644 --- a/core/loaders/ubiquity.py +++ b/core/loaders/ubiquity.py @@ -1,14 +1,33 @@ import re -from urlparse import urlparse +from urllib.parse import urlparse, urljoin -from regluit.utils.lang import get_language_code +from regluit.core.parameters import DOWNLOADABLE +from regluit.utils.lang import lang_to_language_code from . import BaseScraper HAS_EDS = re.compile(r'\(eds?\.\)') -UBIQUITY_HOSTS = ["ubiquitypress.com", "kriterium.se", "oa.finlit.fi", "humanities-map.net", - "oa.psupress.org", "larcommons.net", "uwestminsterpress.co.uk", "stockholmuniversitypress.se", +UBIQUITY_HOSTS = [ + "humanities-map.net", + "hup.fi", + "iitikship.iiti.ac.in", + "kriterium.se", + "larcommons.net", "luminosoa.org", + "oa.finlit.fi", + "oa.psupress.org", + "press.lse.ac.uk", + "press.sjms.nu", + "publishing.vt.edu", + "publikationer.uka.se", + "stockholmuniversitypress.se", + "ubiquitypress.com", + "universitypress.whiterose.ac.uk", + "utsepress.lib.uts.edu.au", + "uwestminsterpress.co.uk", + "www.cardiffuniversitypress.org", + "www.mwv-open.de", + "www.winchesteruniversitypress.org", ] class UbiquityScraper(BaseScraper): @@ -24,8 +43,15 @@ def get_language(self): langlabel = self.doc.find(string='Language') lang = langlabel.parent.parent.find_next_sibling() if langlabel else '' lang = lang.get_text() if lang else '' - lang = get_language_code(lang) if lang else '' + lang = lang_to_language_code(lang) if lang else '' if lang: self.set('language', lang) else: super(UbiquityScraper, self).get_language() + + def get_downloads(self): + for dl_type in DOWNLOADABLE: + dl_a = self.doc.find('a', attrs={'data-category': '{} download'.format(dl_type)}) + if dl_a and 'href' in dl_a.attrs: + url = urljoin(self.base, dl_a['href'].strip()) + self.set('download_url_{}'.format(dl_type), url) diff --git a/core/loaders/utils.py b/core/loaders/utils.py index f559870d1..41cc94673 100644 --- a/core/loaders/utils.py +++ b/core/loaders/utils.py @@ -3,28 +3,30 @@ import re import time import unicodedata -import urlparse +from urllib.parse import urlparse from bs4 import BeautifulSoup import requests from django.conf import settings -from django.core.files.base import ContentFile + from regluit.api.crosswalks import inv_relator_contrib from regluit.bisac.models import BisacHeading from regluit.core.bookloader import add_by_isbn_from_google, merge_works from regluit.core.isbn import ISBN from regluit.core.models import ( - Ebook, EbookFile, Edition, Identifier, path_for_file, Subject, Work, + Ebook, Edition, Identifier, Subject, Work, ) +from .soup import get_soup + logger = logging.getLogger(__name__) def UnicodeDictReader(utf8_data, **kwargs): csv_reader = csv.DictReader(utf8_data, **kwargs) for row in csv_reader: - yield {key: unicode(value, 'utf-8') for key, value in row.iteritems()} + yield {key: str(value, 'utf-8') for key, value in row.iteritems()} def utf8_general_ci_norm(s): """ @@ -42,11 +44,6 @@ def utf8_general_ci_norm(s): s1 = unicodedata.normalize('NFD', s) return ''.join(c for c in s1 if not unicodedata.combining(c)).upper() -def get_soup(url): - response = requests.get(url, headers={"User-Agent": settings.USER_AGENT}) - if response.status_code == 200: - return BeautifulSoup(response.content, 'lxml') - return None def get_authors(book): authors = [] @@ -92,16 +89,9 @@ def get_subjects(book): bisac = BisacHeading.objects.get(notation=code) subjects.append(bisac) except BisacHeading.DoesNotExist: - logger.warning("Please add BISAC {}".format(code)) + logger.warning("Please add BISAC %s", code) return subjects -def add_subject(subject_name, work, authority=''): - try: - subject = Subject.objects.get(name=subject_name) - except Subject.DoesNotExist: - subject = Subject.objects.create(name=subject_name, authority=authority) - subject.works.add(work) - def get_title(book): title = book.get('FullTitle', '') #UMICH if title: @@ -132,7 +122,7 @@ def get_cover(book): if cover.status_code < 400: return cover_url else: - logger.warning("bad cover: {} for: {}".format(cover_url, url)) + logger.warning("bad cover: %s for: %s", cover_url, url) def get_isbns(book): isbns = [] @@ -199,7 +189,7 @@ def load_from_books(books): Author3First, Author3Role, AuthorBio, TableOfContents, Excerpt, DescriptionLong, DescriptionBrief, BISACCode1, BISACCode2, BISACCode3, CopyrightYear, ePublicationDate, eListPrice, ListPriceCurrencyType, List Price in USD (paper ISBN), eTerritoryRights, - SubjectListMARC, , Book-level DOI, URL, License + SubjectListMARC, , Book-level DOI, URL, License ''' @@ -232,11 +222,19 @@ def load_from_books(books): if url: Identifier.set(type='http', value=url, edition=edition, work=work) + # get language + lang = get_language(book) + lang = lang if lang else 'en' + # make sure each isbn is represented by an Edition # also associate authors, publication date, cover, publisher for isbn in isbns: edition = add_by_isbn_from_google(isbn, work=work) if edition and edition.work != work: + work.language = lang + work.save() + edition.work.language = lang + edition.work.save() work = merge_works(work, edition.work) if not edition: edition = Edition(title=title, work=work) @@ -254,30 +252,28 @@ def load_from_books(books): # possibly replace work.description description = get_description(book) if len(description) > len(work.description): - work.description = description + work.description = description.replace('\r\n', '\n') work.save() # set language - lang = get_language(book) - if lang: - work.language = lang - work.save() + work.language = lang + work.save() # add a bisac subject (and ancestors) to work for bisacsh in get_subjects(book): while bisacsh: - add_subject(bisacsh.full_label, work, authority="bisacsh") + Subject.set_by_name(bisacsh.full_label, work, authority="bisacsh") bisacsh = bisacsh.parent - logging.info(u'loaded work {}'.format(work.title)) + logging.info(u'loaded work %s', work.title) loading_ok = loaded_book_ok(book, work, edition) results.append((book, work, edition)) try: - logger.info(u"{} {} {}\n".format(i, title, loading_ok)) + logger.info(u"%s %s %s\n", i, title, loading_ok) except Exception as e: - logger.info(u"{} {} {}\n".format(i, title, str(e))) + logger.info(u"%s %s %s\n", i, title, str(e)) return results @@ -294,7 +290,7 @@ def loaded_book_ok(book, work, edition): try: url_id = Identifier.objects.get(type='http', value=get_url(book)) if url_id is None: - logger.info("url_id problem: work.id {}, url: {}".format(work.id, get_url(book))) + logger.info("url_id problem: work.id %s, url: %s", work.id, get_url(book)) return False except Exception as e: logger.info(str(e)) @@ -302,8 +298,7 @@ def loaded_book_ok(book, work, edition): # isbns for isbn in isbns: - if Identifier.objects.filter(type='isbn', value=isbn).count() <> 1: - # print ("isbn problem: work.id {}, isbn: {}".format(work.id, isbn)) + if Identifier.objects.filter(type='isbn', value=isbn).count() != 1: return False else: try: @@ -312,9 +307,6 @@ def loaded_book_ok(book, work, edition): logger.info(e) return False - # authors - # print set([ed.name for ed in edition_for_isbn.authors.all()]) - if ( set([utf8_general_ci_norm(author[0]) for author in authors]) != set([utf8_general_ci_norm(ed.name) for ed in edition_for_isbn.authors.all()]) @@ -352,142 +344,23 @@ def loaded_book_ok(book, work, edition): ID_URLPATTERNS = { 'goog': re.compile(r'[\./]google\.com/books\?.*id=(?P[a-zA-Z0-9\-_]{12})'), 'olwk': re.compile(r'[\./]openlibrary\.org(?P/works/OL\d{1,8}W)'), - 'doab': re.compile(r'([\./]doabooks\.org/doab\?.*rid:|=oai:doab-books:)(?P\d{1,8})'), + 'doab': re.compile(r'([\./]directory\.doabooks\.org/handle/)(?P20\.500\.12854/\d{5,8})'), 'gdrd': re.compile(r'[\./]goodreads\.com/book/show/(?P\d{1,8})'), 'ltwk': re.compile(r'[\./]librarything\.com/work/(?P\d{1,8})'), 'oclc': re.compile(r'\.worldcat\.org/.*oclc/(?P\d{8,12})'), 'doi': re.compile(r'[\./]doi\.org/(?P10\.\d+/\S+)'), 'gtbg': re.compile(r'[\./]gutenberg\.org/ebooks/(?P\d{1,6})'), 'glue': re.compile(r'[\./]unglue\.it/work/(?P\d{1,7})'), + 'oapn': re.compile(r'[\./]oapen\.org/download\?.*docid=(?P\d{1,8})'), } def ids_from_urls(url): ids = {} - for ident in ID_URLPATTERNS.keys(): - id_match = ID_URLPATTERNS[ident].search(url) + if not url: + return ids + for ident, pattern in ID_URLPATTERNS.items(): + id_match = pattern.search(url) if id_match: ids[ident] = id_match.group('id') return ids -DROPBOX_DL = re.compile(r'"(https://dl.dropboxusercontent.com/content_link/[^"]+)"') - -def dl_online(ebook): - if ebook.format != 'online': - pass - elif ebook.url.find(u'dropbox.com/s/') >= 0: - response = requests.get(ebook.url, headers={"User-Agent": settings.USER_AGENT}) - if response.status_code == 200: - match_dl = DROPBOX_DL.search(response.content) - if match_dl: - return make_dl_ebook(match_dl.group(1), ebook) - else: - logger.warning('couldn\'t get {}'.format(ebook.url)) - else: - logger.warning('couldn\'t get dl for {}'.format(ebook.url)) - - elif ebook.url.find(u'jbe-platform.com/content/books/') >= 0: - doc = get_soup(ebook.url) - if doc: - obj = doc.select_one('div.fulltexticoncontainer-PDF a') - if obj: - dl_url = urlparse.urljoin(ebook.url, obj['href']) - return make_dl_ebook(dl_url, ebook) - else: - logger.warning('couldn\'t get dl_url for {}'.format(ebook.url)) - else: - logger.warning('couldn\'t get soup for {}'.format(ebook.url)) - - return None, False - -def make_dl_ebook(url, ebook): - if EbookFile.objects.filter(source=ebook.url): - return EbookFile.objects.filter(source=ebook.url)[0], False - response = requests.get(url, headers={"User-Agent": settings.USER_AGENT}) - if response.status_code == 200: - filesize = int(response.headers.get("Content-Length", 0)) - filesize = filesize if filesize else None - format = type_for_url(url, content_type=response.headers.get('content-type')) - if format != 'online': - new_ebf = EbookFile.objects.create( - edition=ebook.edition, - format=format, - source=ebook.url, - ) - new_ebf.file.save(path_for_file(new_ebf, None), ContentFile(response.content)) - new_ebf.save() - new_ebook = Ebook.objects.create( - edition=ebook.edition, - format=format, - provider='Unglue.it', - url=new_ebf.file.url, - rights=ebook.rights, - filesize=filesize, - version_label=ebook.version_label, - version_iter=ebook.version_iter, - ) - new_ebf.ebook = new_ebook - new_ebf.save() - return new_ebf, True - else: - logger.warning('download format for {} is not ebook'.format(url)) - else: - logger.warning('couldn\'t get {}'.format(url)) - return None, False - -def type_for_url(url, content_type=None): - if not url: - return '' - if url.find('books.openedition.org') >= 0: - return 'online' - if Ebook.objects.filter(url=url): - return Ebook.objects.filter(url=url)[0].format - ct = content_type if content_type else contenttyper.calc_type(url) - if re.search("pdf", ct): - return "pdf" - elif re.search("octet-stream", ct) and re.search("pdf", url, flags=re.I): - return "pdf" - elif re.search("octet-stream", ct) and re.search("epub", url, flags=re.I): - return "epub" - elif re.search("text/plain", ct): - return "text" - elif re.search("text/html", ct): - if url.find('oapen.org/view') >= 0: - return "html" - return "online" - elif re.search("epub", ct): - return "epub" - elif re.search("mobi", ct): - return "mobi" - return "other" - -class ContentTyper(object): - """ """ - def __init__(self): - self.last_call = dict() - - def content_type(self, url): - try: - r = requests.head(url) - return r.headers.get('content-type', '') - except: - return '' - - def calc_type(self, url): - delay = 1 - # is there a delay associated with the url - netloc = urlparse.urlparse(url).netloc - - # wait if necessary - last_call = self.last_call.get(netloc) - if last_call is not None: - now = time.time() - min_time_next_call = last_call + delay - if min_time_next_call > now: - time.sleep(min_time_next_call-now) - - self.last_call[netloc] = time.time() - - # compute the content-type - return self.content_type(url) - -contenttyper = ContentTyper() diff --git a/core/lookups.py b/core/lookups.py index 06c50936d..05ce92e22 100644 --- a/core/lookups.py +++ b/core/lookups.py @@ -2,7 +2,9 @@ from selectable.registry import registry from django.contrib.auth.models import User +from django.db import models from django.db.models import Count + from regluit.core.models import Work, PublisherName, Edition, Subject, EditionNote, Ebook from regluit.utils.text import sanitize_line @@ -80,6 +82,17 @@ def create_item(self, value): new_note.save() return new_note +class Search(models.Lookup): + lookup_name = 'search' + + def as_mysql(self, compiler, connection): + lhs, lhs_params = self.process_lhs(compiler, connection) + rhs, rhs_params = self.process_rhs(compiler, connection) + params = lhs_params + rhs_params + return 'MATCH (%s) AGAINST (%s IN BOOLEAN MODE)' % (lhs, rhs), params + +models.TextField.register_lookup(Search) + registry.register(OwnerLookup) registry.register(WorkLookup) registry.register(PublisherNameLookup) @@ -87,3 +100,4 @@ def create_item(self, value): registry.register(SubjectLookup) registry.register(EditionNoteLookup) registry.register(EbookLookup) + diff --git a/core/management/commands/add_missing_doab_covers.py b/core/management/commands/add_missing_doab_covers.py deleted file mode 100644 index cd30149cd..000000000 --- a/core/management/commands/add_missing_doab_covers.py +++ /dev/null @@ -1,22 +0,0 @@ -from __future__ import print_function -from django.core.management.base import BaseCommand - -from regluit.core.models import Work -from regluit.core.loaders.doab import update_cover_doab - -class Command(BaseCommand): - help = "make covers for doab editions with bad covers" - - def handle(self, **options): - works = Work.objects.filter(identifiers__type='doab').distinct() - print('checking {} works with doab'.format(works.count())) - num = 0 - for work in works: - if not work.cover_image_thumbnail(): - update_cover_doab(work.doab, work.preferred_edition, store_cover=True) - #print(work.doab) - num += 1 - if num % 10 == 0: - print('{} doab covers updated'.format(num)) - #break - print('Done: {} doab covers updated'.format(num)) \ No newline at end of file diff --git a/core/management/commands/add_openlibrary.py b/core/management/commands/add_openlibrary.py index 99fc68b8a..313c98511 100644 --- a/core/management/commands/add_openlibrary.py +++ b/core/management/commands/add_openlibrary.py @@ -6,5 +6,5 @@ class Command(BaseCommand): def handle(self, *args, **options): for work in models.Work.objects.filter(openlibrary_lookup__isnull=True): - print "loading openlibrary data for %s" % work + self.stdout.write("loading openlibrary data for %s" % work) bookloader.add_openlibrary(work) diff --git a/core/management/commands/bisac_to_bisacsh.py b/core/management/commands/bisac_to_bisacsh.py new file mode 100644 index 000000000..fd039df02 --- /dev/null +++ b/core/management/commands/bisac_to_bisacsh.py @@ -0,0 +1,26 @@ +import re +from django.core.management.base import BaseCommand +from regluit.bisac.models import BisacHeading +from regluit.core.models import Subject + + +bisac_pattern = re.compile(r'[A-Z]{3}\d+') + +class Command(BaseCommand): + help = "fix bisac headings" + + def handle(self, **options): + for subject in Subject.objects.filter(name__contains='bisac'): + print(subject.name) + match = bisac_pattern.search(subject.name) + bisac_code = match.group(0) if match else None + if bisac_code: + try: + bisac_heading = BisacHeading.objects.get(notation=bisac_code) + for work in subject.works.all(): + while bisac_heading: + Subject.set_by_name(bisac_heading.full_label, work, authority="bisacsh") + bisac_heading = bisac_heading.parent + subject.delete() + except BisacHeading.DoesNotExist: + self.stdout.write("no Bisac heading with notation %s" % bisac_code) diff --git a/core/management/commands/check_payment_integrity.py b/core/management/commands/check_payment_integrity.py deleted file mode 100644 index 706ff4079..000000000 --- a/core/management/commands/check_payment_integrity.py +++ /dev/null @@ -1,27 +0,0 @@ -from django.core.management.base import BaseCommand -from django.db.models import Q, F - -from regluit.payment.parameters import TRANSACTION_STATUS_ACTIVE -from regluit.core import models - -class Command(BaseCommand): - help = "Do some integrity checks on our Payments" - - def handle(self, **options): - print "number of Campaigns", models.Campaign.objects.count() - print "number of active Campaigns", models.Campaign.objects.filter(status='ACTIVE').count() - for campaign in models.Campaign.objects.filter(status='ACTIVE'): - print stats_for_active_campaign(campaign) - -def stats_for_active_campaign(campaign): - # might need to calculate 'number of users with more than 1 ACTIVE transaction (should be 0)' - # set([t.user for t in c.transaction_set.filter(status='Active')]) - set(userlists.supporting_users(c.work,1000)) - # everyone with an ACTIVE pledge should have the work on his/her wishlist - # set([w.user for w in c.work.wishlists.all()]) - # set([t.user for t in campaign.transaction_set.filter(status=TRANSACTION_STATUS_ACTIVE)]) - set([w.user for w in c.work.wishlists.all()]) - return {'name': campaign.name, - 'work':campaign.work, - 'number of ACTIVE transactions':campaign.transaction_set.filter(status=TRANSACTION_STATUS_ACTIVE).count(), - 'number of users with ACTIVE transactions': len(set([t.user for t in campaign.transaction_set.filter(status=TRANSACTION_STATUS_ACTIVE)])), - 'total amount of pledges in ACTIVE transactions': sum([t.amount for t in campaign.transaction_set.filter(status=TRANSACTION_STATUS_ACTIVE)]), - } \ No newline at end of file diff --git a/core/management/commands/check_works_integrity.py b/core/management/commands/check_works_integrity.py index 053168741..3ed212eec 100644 --- a/core/management/commands/check_works_integrity.py +++ b/core/management/commands/check_works_integrity.py @@ -7,24 +7,28 @@ class Command(BaseCommand): help = "Do a few integrity checks on Works, Editions, and Identifiers" def handle(self, **options): - print "Number of Works without identifiers: ", models.Work.objects.filter(identifiers__isnull=True).count() - print "Last 20 Works without identifiers: " + self.stdout.write("Number of Works without identifiers: {}".format( + models.Work.objects.filter(identifiers__isnull=True).count())) + self.stdout.write("Last 20 Works without identifiers: ") for w in models.Work.objects.filter(identifiers__isnull=True).order_by('-created')[0:20]: - print "id: %d | title: %s | created: %s" % (w.id, w.title, w.created) + self.stdout.write("id: %d | title: %s | created: %s" % (w.id, w.title, w.created)) # models.Work.objects.filter(identifiers__isnull=True).filter(editions__isnull=False)[0].identifiers.all() - print "Number of editions that are currently tied to Works w/o identifiers ", \ - models.Edition.objects.filter(work__identifiers__isnull=True).count() - print "Number of Identifiers not tied to Works (should be 0): ", \ - models.Identifier.objects.filter(work__isnull=True).count() - print "Number of Editions not tied to a Work (should be 0): ", models.Edition.objects.filter(work__isnull=True).count() - print "Number of Ebooks not tied to an Edition (should be 0): ", models.Ebook.objects.filter(edition__isnull=True).count() + self.stdout.write("Number of editions that are currently tied to Works w/o identifiers {}".format( + models.Edition.objects.filter(work__identifiers__isnull=True).count())) + self.stdout.write("Number of Identifiers not tied to Works (should be 0): {}".format( + models.Identifier.objects.filter(work__isnull=True).count())) + self.stdout.write("Number of Editions not tied to a Work (should be 0): {}".format( + models.Edition.objects.filter(work__isnull=True).count())) + self.stdout.write("Number of Ebooks not tied to an Edition (should be 0): {}".format( + models.Ebook.objects.filter(edition__isnull=True).count())) # is the possibility of problems coming from the fact that there are two places to tie # Work and Edition -- 1) foreign key Edition.work = models.ForeignKey("Work", related_name="editions", null=True) # 2) sharing the same Identifier. # check both that iff a pair of Work and Edition share an identifier, that Work and Edition have a foreign key relationship - print "Number of Works that have editions->identifiers that don't lead back to the same work (should be 0): ", models.Work.objects.filter(~Q(editions__identifiers__work__id = F('id'))).count() + self.stdout.write("Number of Works that have editions->identifiers that don't lead back to the same work (should be 0): {}".format( + models.Work.objects.filter(~Q(editions__identifiers__work__id = F('id'))).count())) # check that for all Identifier pairs with an Edition that Edition<->Work foreign key relationships ties the same Edition/Work - print "Number of Identifier pairs with an Edition in which Edition<->Work foreign key relationships does not tie the same Edition/Work (should be 0): ", \ - models.Identifier.objects.filter(edition__isnull=False).filter(~Q(edition__work__id = F('work__id'))).count() \ No newline at end of file + self.stdout.write("Number of Identifier pairs with an Edition in which Edition<->Work foreign key relationships does not tie the same Edition/Work (should be 0): {}".format( + models.Identifier.objects.filter(edition__isnull=False).filter(~Q(edition__work__id = F('work__id'))).count())) \ No newline at end of file diff --git a/core/management/commands/claim_by_isbn.py b/core/management/commands/claim_by_isbn.py index 00a7d00c7..3c67a9cad 100644 --- a/core/management/commands/claim_by_isbn.py +++ b/core/management/commands/claim_by_isbn.py @@ -9,13 +9,16 @@ class Command(BaseCommand): help = "claim books for rights_holder based on a text file of ISBNs" - args = " " + def add_arguments(self, parser): + parser.add_argument('rights_holder_id', nargs='+', type=int, help="rights_holder id") + parser.add_argument('filename', nargs='+', help="filename") + def handle(self, rights_holder_id, filename, **options): try: rh = models.RightsHolder.objects.get(id=int(rights_holder_id)) except models.Identifier.DoesNotExist: - print '{} not a rights_holder'.format(rights_holder_id) + self.stdout.write('{} not a rights_holder'.format(rights_holder_id)) return with open(filename) as f: for isbn in f: @@ -24,14 +27,14 @@ def handle(self, rights_holder_id, filename, **options): work = models.Identifier.objects.get(type='isbn',value=isbn).work try: c = models.Claim.objects.get(work=work) - print '{} already claimed by {}'.format(work, c.rights_holder) + self.stdout.write('{} already claimed by {}'.format(work, c.rights_holder)) except models.Claim.DoesNotExist: c = models.Claim.objects.create( work=work, rights_holder=rh, user=rh.owner, status='active') - print '{} claimed for {}'.format(work, rh) + self.stdout.write('{} claimed for {}'.format(work, rh)) except models.Identifier.DoesNotExist: - print '{} not loaded'.format(isbn) + self.stdout.write('{} not loaded'.format(isbn)) continue diff --git a/core/management/commands/clean_broken_gitenberg_ebooks.py b/core/management/commands/clean_broken_gitenberg_ebooks.py index b9defdef3..69a12cf82 100644 --- a/core/management/commands/clean_broken_gitenberg_ebooks.py +++ b/core/management/commands/clean_broken_gitenberg_ebooks.py @@ -1,6 +1,6 @@ from __future__ import print_function from itertools import islice -from urlparse import urlparse +from urllib.parse import urlparse import sys import requests @@ -33,10 +33,10 @@ def calc_problem_ebooks(): status_code = requests.head(ebook.url).status_code - if status_code <> 302: + if status_code != 302: non302statuscode_count += 1 - print ("\r", i, ebook.url, status_code, non302statuscode_count, end="") + self.stdout.write("\r", i, ebook.url, status_code, non302statuscode_count, end="") sys.stdout.flush() results.append( @@ -47,7 +47,7 @@ def calc_problem_ebooks(): ) - return [result for result in results if result['status_code'] <> 302] + return [result for result in results if result['status_code'] != 302] class Command(BaseCommand): @@ -56,15 +56,15 @@ class Command(BaseCommand): def handle(self, **options): problem_ebooks = calc_problem_ebooks() - print ("number of problem ebooks", len(problem_ebooks)) + self.stdout.write("number of problem ebooks", len(problem_ebooks)) # deactivate problem ebooks for (i, result) in enumerate(problem_ebooks): ebook = Ebook.objects.get(id=result['id']) - print ("\r", "deactivating ", i, ebook.id, end="") + self.stdout.write("\r", "deactivating ", i, ebook.id, end="") ebook.deactivate() # reload repos for (i, repo_name) in enumerate(set([repo_name_from_url(ebook['url']) for ebook in problem_ebooks])): - print ("reloading ", repo_name) + self.stdout.write("reloading ", repo_name) load_from_yaml(yaml_url(repo_name)) diff --git a/core/management/commands/clean_dangling_works.py b/core/management/commands/clean_dangling_works.py new file mode 100644 index 000000000..a7d9a3b4f --- /dev/null +++ b/core/management/commands/clean_dangling_works.py @@ -0,0 +1,22 @@ +from django.core.management.base import BaseCommand +from django.db.models import Count + +from regluit.core.models import Work, WasWork +from regluit.core.bookloader import merge_works + + + +class Command(BaseCommand): + '''remove works and editions without titles''' + help = "remove works and editions without titles" + + def handle(self, **options): + orphans = Work.objects.annotate(num_editions=Count('editions')).filter(num_editions=0) + for work in orphans: + self.stdout.write('cleaning %s' % work.title) + parent = None + for parent in WasWork.objects.filter(was=work.id): + # remerge into parent + merge_works(parent.work, work) + if not parent: + work.delete() diff --git a/core/management/commands/clean_db_strings.py b/core/management/commands/clean_db_strings.py index 2ce39ff60..4fefc6ddc 100644 --- a/core/management/commands/clean_db_strings.py +++ b/core/management/commands/clean_db_strings.py @@ -1,10 +1,8 @@ -from __future__ import print_function - from django.core.management.base import BaseCommand from django.db import IntegrityError from regluit.core import models -from regluit.utils.text import sanitize_line, remove_badxml +from regluit.utils.text import sanitize_line, remove_author_junk, remove_badxml class Command(BaseCommand): @@ -18,32 +16,40 @@ def handle(self, **options): work.title = sanitize_line(work.title) work.save() work_titles_fixed +=1 - if work.description and remove_badxml(work.description) != work.description: - work.description = remove_badxml(work.description) - work.save() - work_descriptions_fixed +=1 - print ("work_titles_fixed = {}".format(work_titles_fixed)) - print ("work_descriptions_fixed = {}".format(work_descriptions_fixed)) + if work.description: + save = False + if '\r\n' in work.description: + work.description = work.description.replace('\r\n', '\n') + save = True + if work.description and remove_badxml(work.description) != work.description: + work.description = remove_badxml(work.description) + save = True + if save: + work.save() + work_descriptions_fixed +=1 + self.stdout.write("work_titles_fixed = {}".format(work_titles_fixed)) + self.stdout.write("work_descriptions_fixed = {}".format(work_descriptions_fixed)) for edition in models.Edition.objects.all(): if sanitize_line(edition.title) != edition.title: edition.title = sanitize_line(edition.title) edition.save() edition_titles_fixed +=1 - print ("edition_titles_fixed = {}".format(edition_titles_fixed)) + self.stdout.write("edition_titles_fixed = {}".format(edition_titles_fixed)) for author in models.Author.objects.all(): - if sanitize_line(author.name) != author.name: - author.name = sanitize_line(author.name) - try: - author.save() - except IntegrityError as e: - # duplicate entry - correct = models.Author.objects.get(name=sanitize_line(author.name)) - for relator in author.relator_set.all(): - relator.author = correct - relator.save() - author.delete() - author_names_fixed +=1 - print ("author_names_fixed = {}".format(author_names_fixed)) + if remove_author_junk(sanitize_line(author.name)) != author.name: + author.name = remove_author_junk(sanitize_line(author.name)) + if author.name: + try: + author.save() + except IntegrityError as e: + # duplicate entry + correct = models.Author.objects.get(name=sanitize_line(author.name)) + for relator in author.relator_set.all(): + relator.author = correct + relator.save() + author.delete() + author_names_fixed +=1 + self.stdout.write("author_names_fixed = {}".format(author_names_fixed)) for publishername in models.PublisherName.objects.all(): if sanitize_line(publishername.name) != publishername.name: publishername.name = sanitize_line(publishername.name) @@ -60,4 +66,4 @@ def handle(self, **options): publisher.save() publishername.delete() publisher_names_fixed +=1 - print ("publisher_names_fixed = {}".format(publisher_names_fixed)) + self.stdout.write("publisher_names_fixed = {}".format(publisher_names_fixed)) diff --git a/core/management/commands/clean_languages.py b/core/management/commands/clean_languages.py new file mode 100644 index 000000000..c98383832 --- /dev/null +++ b/core/management/commands/clean_languages.py @@ -0,0 +1,20 @@ +from django.core.management.base import BaseCommand + +from regluit.core.models import Work +from regluit.utils.lang import lang_to_language_code, lang_and_locale, iso639 + +iso639 = r'^[a-z][a-z][a-z]?$' +lang_and_locale = r'^[a-z][a-z]\-[A-Z][A-Z]$' + +class Command(BaseCommand): + '''remove works and editions without titles''' + help = "remove works and editions without titles" + + def handle(self, **options): + badworks = Work.objects.exclude(language__regex=iso639) + badworks = badworks.exclude(language__regex=lang_and_locale) + self.stdout.write('{} works to fix'.format(badworks.count())) + for work in badworks: + language = lang_to_language_code(work.language) + work.language = language if language else 'xx' + work.save() diff --git a/core/management/commands/convert_campaign_ebooks_to_mobi.py b/core/management/commands/convert_campaign_ebooks_to_mobi.py deleted file mode 100644 index 279b51914..000000000 --- a/core/management/commands/convert_campaign_ebooks_to_mobi.py +++ /dev/null @@ -1,24 +0,0 @@ -from itertools import islice -from django.core.management.base import BaseCommand -from regluit.core import (mobigen, tasks) - - -class Command(BaseCommand): - help = "For campaign works without a mobi ebook, generate mobi ebooks where possible." - args = " " - - def handle(self, limit=None, async=True, **options): - - if limit is not None: - limit = int(limit) - - for (i, edition) in enumerate(islice(mobigen.editions_to_convert(), limit)): - - print (i, edition.work.get_absolute_url()) - - if async: - task = tasks.generate_mobi_ebook_for_edition.delay(edition) - print (task.id) - else: - ebook = mobigen.generate_mobi_ebook_for_edition(edition) - print (ebook.id) diff --git a/core/management/commands/dedupe_doab.py b/core/management/commands/dedupe_doab.py new file mode 100644 index 000000000..097db2ad6 --- /dev/null +++ b/core/management/commands/dedupe_doab.py @@ -0,0 +1,28 @@ +from django.core.management.base import BaseCommand +from django.db.models import Count,Subquery, OuterRef, IntegerField + +from regluit.core.loaders.doab import get_doab_record +from regluit.core.models import Work, Identifier + + +class Command(BaseCommand): + help = "remove duplicate doab ids " + + def handle(self, **options): + doab_works = Work.objects.annotate( + doab_count=Subquery( + Identifier.objects.filter( + type='doab', + work=OuterRef('pk') + ).values('work') + .annotate(cnt=Count('pk')) + .values('cnt'), + output_field=IntegerField() + ) + ) + for w in doab_works.filter(doab_count__gt=1): + for ident in w.identifiers.filter(type="doab"): + record = get_doab_record(ident.value) + if not record: + self.stdout.write('removing %s' % ident.value) + ident.delete() diff --git a/core/management/commands/dedupe_ebooks_with_same_urls.py b/core/management/commands/dedupe_ebooks_with_same_urls.py index e2d53ede7..b77764d65 100644 --- a/core/management/commands/dedupe_ebooks_with_same_urls.py +++ b/core/management/commands/dedupe_ebooks_with_same_urls.py @@ -7,17 +7,17 @@ def delete_newest_ebooks(ebooks): given a list of ebooks (presumably with the same URL), delete all but the ebook that was created first """ for ebook in sorted(ebooks, key=lambda ebook: ebook.created)[1:]: - print "deleting ebook.id {}, edition.id {} work.id {}".format(ebook.id, + self.stdout.write("deleting ebook.id {}, edition.id {} work.id {}".format(ebook.id, ebook.edition_id, - ebook.edition.work_id) + ebook.edition.work_id)) ebook.delete() intact = ebooks[0] - print "leaving undeleted: ebook.id {}, edition.id {} work.id {}".format( + print("leaving undeleted: ebook.id {}, edition.id {} work.id {}".format( intact.id, intact.edition_id, intact.edition.work_id - ) + )) class Command(BaseCommand): @@ -34,5 +34,5 @@ def handle(self, **options): # look through the URLs locating ones with more than one ebook for (url, ebooks) in ebooks_by_url.items(): if len(ebooks) > 1: - print (url, len(ebooks)) + self.stdout.write(url, len(ebooks)) delete_newest_ebooks(ebooks) \ No newline at end of file diff --git a/core/management/commands/dedupe_onlines.py b/core/management/commands/dedupe_onlines.py new file mode 100644 index 000000000..92747eb81 --- /dev/null +++ b/core/management/commands/dedupe_onlines.py @@ -0,0 +1,27 @@ +from django.core.management.base import BaseCommand + +from django.db.models import Count +from regluit.core.models import Work, Ebook, EbookFile + +class Command(BaseCommand): + help = "remove old online ebooks from same provider" + + def handle(self, **options): + allonlines = Work.objects.filter(editions__ebooks__format='online').distinct() + self.stdout.write('%s works with online ebooks' % allonlines.count()) + removed = 0 + for work in allonlines: + onlines = Ebook.objects.filter( + edition__work__id=work.id, + format='online' + ).order_by('-created') + num_onlines = onlines.count() + if num_onlines >= 2: + new_provider = onlines[0].provider + for online in onlines[1:]: + harvested = EbookFile.objects.filter(source=online.url).count() + if not harvested and online.provider == new_provider: + self.stdout.write(online.edition.work.title) + online.delete() + removed += 1 + self.stdout.write('%s online ebooks removed' % removed) \ No newline at end of file diff --git a/core/management/commands/delete_empty_works.py b/core/management/commands/delete_empty_works.py new file mode 100644 index 000000000..4a7cf387c --- /dev/null +++ b/core/management/commands/delete_empty_works.py @@ -0,0 +1,18 @@ +from django.core.management.base import BaseCommand + +from regluit.core.models import Work + + + +class Command(BaseCommand): + '''remove works and editions without titles''' + help = "remove works and editions without titles" + + def handle(self, **options): + badworks = Work.objects.filter(title='') + + for work in badworks: + work.selected_edition = None + for edition in work.editions.all(): + edition.delete() + work.delete() diff --git a/core/management/commands/delete_subjects_with_commas.py b/core/management/commands/delete_subjects_with_commas.py index 078784c44..6e4c8b185 100644 --- a/core/management/commands/delete_subjects_with_commas.py +++ b/core/management/commands/delete_subjects_with_commas.py @@ -14,5 +14,5 @@ def handle(self, **options): for subject in comma_subjects: num_commas = len(subject.name.split(','))-1 if num_commas >2: - print subject.name + self.stdout.write(subject.name) subject.delete() diff --git a/core/management/commands/delete_subjects_with_nonxml.py b/core/management/commands/delete_subjects_with_nonxml.py index e4c698fe0..b3a344f3e 100644 --- a/core/management/commands/delete_subjects_with_nonxml.py +++ b/core/management/commands/delete_subjects_with_nonxml.py @@ -22,7 +22,10 @@ class Command(BaseCommand): def handle(self, **options): bad_subjects = [subject for subject in Subject.objects.all() if clean_string(subject.name) != subject.name] - print ("number of bad subjects:", len(bad_subjects)) + print("number of bad subjects: %s" % len(bad_subjects)) for bad_subject in bad_subjects: - print (bad_subject.name.encode('ascii', 'ignore'), bad_subject.works.count()) + self.stdout.write('{}, {}'.format( + bad_subject.name.encode('ascii', 'ignore'), + bad_subject.works.count() + )) bad_subject.delete() diff --git a/core/management/commands/despam_descriptions.py b/core/management/commands/despam_descriptions.py deleted file mode 100644 index d70c0a4e0..000000000 --- a/core/management/commands/despam_descriptions.py +++ /dev/null @@ -1,17 +0,0 @@ -from django.core.management.base import BaseCommand - -from regluit.core import models, bookloader - -class Command(BaseCommand): - help = "check description db for free ebook spam" - - def handle(self, **options): - spam_strings=["1stWorldLibrary.ORG", "GeneralBooksClub.com", "million-books.com", "AkashaPublishing.Com"] - for spam_string in spam_strings: - qs=models.Work.objects.filter(description__icontains=spam_string) - print "Number of Works with %s in description: %s" % (spam_string, qs.count()) - - for work in qs: - work.description = bookloader.despam_description(work.description) - print "updating work %s" % work - bookloader.add_openlibrary(work, hard_refresh = True) diff --git a/core/management/commands/dump_emails.py b/core/management/commands/dump_emails.py deleted file mode 100644 index a8975eb41..000000000 --- a/core/management/commands/dump_emails.py +++ /dev/null @@ -1,19 +0,0 @@ -""" -print user emails -""" - -from django.core.management.base import BaseCommand -from django.contrib.auth.models import User - -from regluit.core import models - -class Command(BaseCommand): - help = "dump all ungluer emails" - - def handle(self, **options): - num=0 - - for user in User.objects.all(): - print user.email - num=num+1 - print "Number of emails= %s" % num diff --git a/core/management/commands/fail_campaign_amazon.py b/core/management/commands/fail_campaign_amazon.py deleted file mode 100644 index 8a73d2ace..000000000 --- a/core/management/commands/fail_campaign_amazon.py +++ /dev/null @@ -1,19 +0,0 @@ -""" -print user emails -""" - -from django.core.management.base import BaseCommand - -from regluit.core import models, signals - -class Command(BaseCommand): - help = "set active campaigns to unsuccessful" - - def handle(self, **options): - for campaign in models.Campaign.objects.filter(status='ACTIVE'): - campaign.status = 'UNSUCCESSFUL' - campaign.save() - action = models.CampaignAction(campaign=campaign, type='failed', comment = 'amazon suspension') - action.save() - signals.amazon_suspension.send(sender=None,campaign=campaign) - print 'campaign %s set to UNSUCCESSFUL' % campaign.id diff --git a/core/management/commands/fix_avatars.py b/core/management/commands/fix_avatars.py new file mode 100644 index 000000000..1383168e2 --- /dev/null +++ b/core/management/commands/fix_avatars.py @@ -0,0 +1,16 @@ +import string +from django.core.management.base import BaseCommand +from regluit.core.models import UNGLUEITAR +from regluit.libraryauth.auth import pic_storage_url + +from regluit.core import models + +class Command(BaseCommand): + help = "fix avatar urls and settings" + + def handle(self, **options): + for profile in models.UserProfile.objects.exclude(pic_url=''): + self.stdout.write("updating user %s" % profile.user) + if not profile.pic_url.startswith('https://unglueit'): + profile.pic_url = pic_storage_url(profile.user, 'twitter', profile.pic_url) + profile.save() diff --git a/core/management/commands/fix_covers.py b/core/management/commands/fix_covers.py new file mode 100644 index 000000000..942a796d7 --- /dev/null +++ b/core/management/commands/fix_covers.py @@ -0,0 +1,106 @@ +from django.core.management.base import BaseCommand + +import re +import requests +from regluit.core.models import Edition, Work +from regluit.core.loaders.doab import store_doab_cover + +to_fix = [ +"20.500.12854/88675", +"20.500.12854/88677", +"20.500.12854/88678", +"20.500.12854/88679", +"20.500.12854/88680", +"20.500.12854/88681", +"20.500.12854/88683", +"20.500.12854/88686", +"20.500.12854/88687", +"20.500.12854/89178", +"20.500.12854/89252", +"20.500.12854/89255", +"20.500.12854/89257", +"20.500.12854/89260", +"20.500.12854/89265", +"20.500.12854/89441", +"20.500.12854/89490", +"20.500.12854/89496", +"20.500.12854/89498", +"20.500.12854/89514", +"20.500.12854/91350", +"20.500.12854/96212", +] +missing = [ +] + +class Command(BaseCommand): + """ To repair covers, will need a new refresh_cover method""" + help = "fix bad covers" + + def add_arguments(self, parser): + parser.add_argument('doab', nargs='?', default='', help="doab to fix") + + def handle(self, doab, **options): + if doab == 'mangled': + self.fix_mangled_covers() + elif doab == 'list': + for doab_id in to_fix: + self.fix_doab_cover(doab_id) + return True + elif doab == 'null': + no_cover_doab = Work.objects.filter(identifiers__type='doab').exclude(editions__cover_image__isnull=False) + for work in no_cover_doab: + cover_url = self.refresh_cover(work.doab) + if cover_url: + for e in work.editions.all(): + e.cover_image = cover_url + e.save() + self.stdout.write(f'added cover for {work.doab}') + else: + return self.fix_doab_cover(doab) + return False + + def fix_doab_cover(self, doab): + eds = Edition.objects.filter(cover_image__contains=doab) + + cover_url = self.refresh_cover(doab) + if cover_url: + for e in eds: + e.cover_image = cover_url + e.save() + if e.cover_image_small() and e.cover_image_thumbnail(): + self.stdout.write('fixed %s using %s' % (doab, cover_url)) + else: + self.stdout.write('bad thumbnails for %s' % cover_url) + return False + return True + self.stdout.write('removing bad cover for %s' % doab) + + for e in eds: + e.cover_image = None + e.save() + return False + + def fix_mangled_covers(self): + eds = Edition.objects.filter(cover_image__contains='amazonaws.comdoab') + for ed in eds: + cover_url = ed.cover_image.replace('amazonaws.comdoab', 'amazonaws.com/doab') + ed.cover_image = cover_url + ed.save() + self.stdout.write('fixed %s mangled covers' % eds.count()) + eds = Edition.objects.exclude(cover_image__startswith='http').filter(cover_image__regex='.') + for ed in eds: + ed.cover_image = '' + ed.save() + self.stdout.write('fixed %s file covers' % eds.count()) + fixed = 0 + for cover in missing: + eds = Edition.objects.filter(cover_image=cover) + for ed in eds: + ed.cover_image = '' + ed.save() + fixed += 1 + self.stdout.write('fixed %s file covers' % fixed) + + def refresh_cover(self, doab): + new_cover, created = store_doab_cover(doab, redo=True) + return new_cover diff --git a/core/management/commands/fix_inactive.py b/core/management/commands/fix_inactive.py new file mode 100644 index 000000000..17f1c4538 --- /dev/null +++ b/core/management/commands/fix_inactive.py @@ -0,0 +1,23 @@ +import re + +from django.core.management.base import BaseCommand + +from django.db.models import Sum + +from regluit.core.models import Work, Ebook +from regluit.core.loaders.harvest import DOWNLOADABLE + + +class Command(BaseCommand): + help = "fix inactive Ebooks" + + def handle(self, **options): + + qs = Work.objects.annotate(num_free=Sum('editions__ebook_files')).filter(num_free__gt=0) + self.stdout.write(str(qs.filter(is_free=False).count())) + for free in qs.filter(is_free=False): + for ebook in Ebook.objects.filter(edition__work_id=free.id, format__in=DOWNLOADABLE).order_by('-created'): + ebook.activate() + break + self.stdout.write(str(qs.filter(is_free=False).count())) + diff --git a/core/management/commands/fix_mdpi.py b/core/management/commands/fix_mdpi.py new file mode 100644 index 000000000..79a13207b --- /dev/null +++ b/core/management/commands/fix_mdpi.py @@ -0,0 +1,75 @@ +import re + +from django.core.management.base import BaseCommand + +from regluit.core.models import Ebook, EbookFile + + +class Command(BaseCommand): + help = "fix mdpi Ebooks" + + def handle(self, **options): + mdpi_match = re.compile(r'https://res.mdpi.com/bookfiles/book/(\d+)(.*)\?v=\d+') + + mdpi_ebs = Ebook.objects.filter(url__startswith='https://res.mdpi.com/bookfiles/book/', url__contains="?v=") + mdpi_ebfs = EbookFile.objects.filter(source__startswith='https://res.mdpi.com/bookfiles/book/', source__contains="?v=") + self.stdout.write('Ebooks %s, Ebook Files %s' % (mdpi_ebs.count(), mdpi_ebfs.count())) + + done = [] + for ebf in mdpi_ebfs.order_by('-created'): + match_ebf = mdpi_match.match(ebf.source) + if match_ebf: + bookno = match_ebf.group(1) + if bookno in done: + continue + else: + done.append(bookno) + stem = ebf.source.split('?')[0] + online_url = 'https://www.mdpi.com/books/pdfview/book/' + bookno + size = ebf.ebook.filesize + + # change the ebook provider to unglue.it + if ebf.ebook.provider != 'Unglue.it': + ebf.ebook.provider = 'Unglue.it' + ebf.ebook.url = ebf.file.url + ebf.ebook.active = True + ebf.ebook.save() + + # create the online ebook that should have been + online=Ebook.objects.get_or_create(format='online', url=online_url, edition=ebf.edition, + active=False, rights=ebf.ebook.rights, provider='MDPI Books') + + # reset ebf source + ebf.source = online_url + ebf.save() + + # check for duplicate ebfs + for old_ebook in mdpi_ebs.filter(url__contains='/' + bookno + '/').exclude(id=ebf.id).order_by('-created'): + old_ebook.active = False + for oldebf in old_ebook.ebook_files.exclude(id=ebf.id): + if oldebf.file != ebf.file: + # save storage by deleting redundant files + oldebf.file.delete() + oldebf.file = ebf.file + oldebf.source = ebf.source.split('?')[0] + oldebf.save() + old_ebook.save() + + # now make the rest of the ebooks onlines + done = [] + for eb in mdpi_ebs.filter(active=True): + match_eb = mdpi_match.match(eb.url) + if match_eb: + # make sure not already harvested + if eb.ebook_files.count(): + self.stdout.write('ebook %s already harvested' % eb.id) + continue + bookno = match_eb.group(1) + eb.active = False + if bookno in done: + eb.active = False + eb.url = eb.url.split('?')[0] + eb.save() + + + diff --git a/core/management/commands/fix_ol_descriptions.py b/core/management/commands/fix_ol_descriptions.py index 4564736e1..0683c999d 100644 --- a/core/management/commands/fix_ol_descriptions.py +++ b/core/management/commands/fix_ol_descriptions.py @@ -6,8 +6,8 @@ class Command(BaseCommand): help = "do OL relookup if description contains { " def handle(self, **options): - print "Number of Works with { in description: %s" % models.Work.objects.filter(description__contains='{').count() + self.stdout.write("Number of Works with { in description: %s" % models.Work.objects.filter(description__contains='{').count()) for work in models.Work.objects.filter(description__contains='{'): - print "updating work %s" % work + self.stdout.write("updating work %s" % work) bookloader.add_openlibrary(work, hard_refresh = True) diff --git a/core/management/commands/fix_online_ebooks.py b/core/management/commands/fix_online_ebooks.py new file mode 100644 index 000000000..2f8a21031 --- /dev/null +++ b/core/management/commands/fix_online_ebooks.py @@ -0,0 +1,27 @@ +from django.core.management.base import BaseCommand + +from regluit.core.loaders.doab_utils import online_to_download +from regluit.core.models import Ebook + +class Command(BaseCommand): + help = "deactivate dead oapen ebooks" + args = "" + + def add_arguments(self, parser): + parser.add_argument('limit', nargs='?', type=int, default=0, help="max to fix") + + def handle(self, limit=0, **options): + limit = int(limit) if limit else 0 + onlines = Ebook.objects.filter(active=1, provider='OAPEN Library', + url__contains='/download/') + done = 0 + for online in onlines: + online.active = False + online.save() + done += 1 + #self.stdout.write(online.edition.work.title) + if done > limit: + break + self.stdout.write('fixed {} ebooks'.format(done)) + if done >= 1000: + self.stdout.write('1000 is the maximum; repeat to do more') diff --git a/core/management/commands/fix_stuff.py b/core/management/commands/fix_stuff.py new file mode 100644 index 000000000..f169f6277 --- /dev/null +++ b/core/management/commands/fix_stuff.py @@ -0,0 +1,23 @@ +from django.core.management.base import BaseCommand + +from regluit.core.models import EbookFile, Ebook +from regluit.core.loaders.soup import get_soup + +class Command(BaseCommand): + + def handle(self, **options): + for ebf in EbookFile.objects.filter(ebook__isnull=True, source__isnull=False): + ebf.delete() + for ebf in EbookFile.objects.filter(ebook__filesize=0): + try: + ebf.ebook.filesize = ebf.file.size + ebf.ebook.save() + except: + pass + for ebf in EbookFile.objects.filter(ebook__filesize__isnull=True): + try: + ebf.ebook.filesize = ebf.file.size + ebf.ebook.save() + except: + pass + diff --git a/core/management/commands/fix_twitter_avatars.py b/core/management/commands/fix_twitter_avatars.py deleted file mode 100644 index 520513093..000000000 --- a/core/management/commands/fix_twitter_avatars.py +++ /dev/null @@ -1,16 +0,0 @@ -import string -from django.core.management.base import BaseCommand -from regluit.core.models import TWITTER - -from regluit.core import models - -class Command(BaseCommand): - help = "fix old twitter avatar urls" - - def handle(self, **options): - print "Number of users affected with : %s" % models.UserProfile.objects.filter( pic_url__contains='//si0.twimg.com').count() - - for profile in models.UserProfile.objects.filter(pic_url__contains='//si0.twimg.com'): - print "updating user %s" % profile.user - profile.pic_url = string.replace( profile.pic_url, '//si0.twimg.com','//pbs.twimg.com') - profile.save() diff --git a/core/management/commands/goodreads_books.py b/core/management/commands/goodreads_books.py deleted file mode 100644 index 49c88b51c..000000000 --- a/core/management/commands/goodreads_books.py +++ /dev/null @@ -1,16 +0,0 @@ -from itertools import islice - -from django.conf import settings -from django.core.management.base import BaseCommand - -from regluit.core.goodreads import GoodreadsClient - -class Command(BaseCommand): - help = "list books on given user bookshelf" - args = "" - - def handle(self, user_id, shelf_name, max_books, **options): - max_books = int(max_books) - gc = GoodreadsClient(key=settings.GOODREADS_API_KEY, secret=settings.GOODREADS_API_SECRET) - for (i, review) in enumerate(islice(gc.review_list(user_id,shelf=shelf_name),max_books)): - print i, review["book"]["title"], review["book"]["isbn10"], review["book"]["small_image_url"] \ No newline at end of file diff --git a/core/management/commands/goodreads_load_books.py b/core/management/commands/goodreads_load_books.py deleted file mode 100644 index 0a61f1c0b..000000000 --- a/core/management/commands/goodreads_load_books.py +++ /dev/null @@ -1,19 +0,0 @@ -from django.conf import settings -from django.contrib.auth.models import User -from django.core.management.base import BaseCommand - -from regluit.core import tasks, bookloader -from regluit.core.goodreads import GoodreadsClient - -#from regluit.core.goodreads import load_shelf_into_wishlist - -class Command(BaseCommand): - help = "list books on given user bookshelf" - args = "" - - def handle(self, user_name, goodreads_user_id, shelf_name, max_books, **options): - - user = User.objects.get(username=user_name) - max_books = int(max_books) - - tasks.load_goodreads_shelf_into_wishlist.delay(user.id, shelf_name, goodreads_user_id, max_books) \ No newline at end of file diff --git a/core/management/commands/harvest_manual_ebooks.py b/core/management/commands/harvest_manual_ebooks.py new file mode 100644 index 000000000..a5a7de89b --- /dev/null +++ b/core/management/commands/harvest_manual_ebooks.py @@ -0,0 +1,30 @@ +from random import shuffle +from django.core.management.base import BaseCommand + +from regluit.core.loaders.harvest import harvest_manual +from regluit.core.models import Ebook + +class Command(BaseCommand): + help = "load manually harvested ebooks" + + def add_arguments(self, parser): + parser.add_argument('--ebook', nargs='?', type=int, default=0, help="ebook to harvest") + parser.add_argument('--provider', nargs='?', default='', help="provider to harvest") + + def handle(self, limit=0, trace=False, **options): + if options.get('ebook'): + onlines = Ebook.objects.filter(id=options.get('ebook')) + elif options.get('provider'): + onlines = Ebook.objects.filter(provider=options.get('provider')) + self.stdout.write('%s onlines to check' % onlines.count()) + done = 0 + providers = {} + + for online in onlines: + new_ebf, new = harvest_manual(online) + if new_ebf and new: + done += new + providers[online.provider] = providers.get(online.provider, 0) + 1 + self.stdout.write(new_ebf.edition.work.title) + self.stdout.write('harvested {} ebooks'.format(done)) + self.stdout.write(str(providers)) diff --git a/core/management/commands/harvest_online_ebooks.py b/core/management/commands/harvest_online_ebooks.py index 06aeeab91..85fcd71a9 100644 --- a/core/management/commands/harvest_online_ebooks.py +++ b/core/management/commands/harvest_online_ebooks.py @@ -1,21 +1,52 @@ +from random import shuffle from django.core.management.base import BaseCommand -from regluit.core.loaders.utils import dl_online +from regluit.core.loaders.harvest import dl_online, rl, CMPPROVIDERS from regluit.core.models import Ebook class Command(BaseCommand): help = "harvest downloadable ebooks from 'online' ebooks" args = "" - - def handle(self, limit=0, **options): + + def add_arguments(self, parser): + parser.add_argument('limit', nargs='?', type=int, default=0, help="max to harvest") + parser.add_argument('--ebook', nargs='?', type=int, default=0, help="ebook to harvest") + parser.add_argument('--provider', nargs='?', default='', help="provider to harvest") + parser.add_argument('--format', nargs='?', default='online', help="format to harvest") + parser.add_argument('--trace', action='store_true', help="trace") + + def handle(self, limit=0, trace=False, **options): limit = int(limit) if limit else 0 - onlines = Ebook.objects.filter(format='online') + #rl = RateLimiter() + format = options.get('format') + if options.get('ebook'): + onlines = Ebook.objects.filter(id=options.get('ebook')) + elif options.get('provider'): + provider = options.get('provider') + if provider == 'CMPPROVIDERS': + onlines = Ebook.objects.filter(provider__in=CMPPROVIDERS) + else: + onlines = Ebook.objects.filter(provider=provider, format=format) + self.stdout.write('%s onlines to check' % onlines.count()) + else: + online_ids = [ebook.id for ebook in Ebook.objects.filter(format=format)] + self.stdout.write('%s onlines to check' % len(online_ids)) + shuffle(online_ids) + onlines = (Ebook.objects.get(id=id) for id in online_ids) done = 0 + providers = {} + for online in onlines: - new_ebf, new = dl_online(online) + if trace: + self.stdout.write(str(online.id)) + new_ebf, new = dl_online(online, limiter=rl.delay, format=format) if new_ebf and new: - done += 1 - if done > limit: + done += new + providers[online.provider] = providers.get(online.provider, 0) + 1 + self.stdout.write(new_ebf.edition.work.title) + if done >= limit or done >= 500: break - print 'harvested {} ebooks'.format(done) - + self.stdout.write('harvested {} ebooks'.format(done)) + self.stdout.write(str(providers)) + if done >= 500: + self.stdout.write('500 is the maximum; repeat to do more') diff --git a/core/management/commands/harvest_remote_ebooks.py b/core/management/commands/harvest_remote_ebooks.py new file mode 100644 index 000000000..afa1e9209 --- /dev/null +++ b/core/management/commands/harvest_remote_ebooks.py @@ -0,0 +1,63 @@ +from random import shuffle +from django.core.management.base import BaseCommand + +from regluit.core.loaders.harvest import archive_dl, RateLimiter, DONT_HARVEST +from regluit.core.models import Ebook +from regluit.core.parameters import GOOD_PROVIDERS +DOWNLOADABLE = ['pdf', 'epub'] + +DONT_CHECK = list(GOOD_PROVIDERS) + DONT_HARVEST + +class Command(BaseCommand): + help = "check/harvest ebooks from 'remote' ebooks" + args = "" + + def add_arguments(self, parser): + parser.add_argument('limit', nargs='?', type=int, default=0, help="max to harvest") + parser.add_argument('--ebook', nargs='?', type=int, default=0, help="ebook to harvest") + parser.add_argument('--provider', nargs='?', default='', help="provider to harvest") + parser.add_argument('--format', nargs='?', default='all', help="format to harvest") + parser.add_argument('--trace', action='store_true', help="trace") + + def handle(self, limit=0, trace=False, **options): + limit = int(limit) if limit else 0 + rl = RateLimiter() + format = options.get('format') + if format == 'all': + onlines = Ebook.objects.filter(format__in=DOWNLOADABLE) + else: + onlines = Ebook.objects.filter(format=format) + if options.get('ebook'): + onlines = Ebook.objects.filter(id=options.get('ebook')) + elif options.get('provider'): + onlines = onlines.filter(provider=options.get('provider')) + else: + onlines = onlines.exclude(provider__in=DONT_CHECK) + online_ids = [ebook.id for ebook in onlines] + self.stdout.write('%s ebooks need checking.' % len(onlines)) + shuffle(online_ids) + onlines = (Ebook.objects.get(id=id) for id in online_ids) + archived = {} + failed = {} + done = 0 + for online in onlines: + if trace: + self.stdout.write(str(online.id)) + status = archive_dl(online, limiter=rl.delay) + if status == 1: + done += 1 + archived[online.provider] = archived.get(online.provider, 0) + 1 + self.stdout.write(online.edition.title) + elif status == -1: + done += 1 + failed[online.provider] = failed.get(online.provider, 0) + 1 + online.format = 'online' + online.active = False + online.save() + if done >= limit or done >= 2000: + break + self.stdout.write("archived") + for result in [archived, failed]: + for provider in result: + self.stdout.write('%s\t%s' % (provider, result[provider])) + self.stdout.write("failed") diff --git a/core/management/commands/initialize_pledge_badges.py b/core/management/commands/initialize_pledge_badges.py deleted file mode 100644 index a1849e84b..000000000 --- a/core/management/commands/initialize_pledge_badges.py +++ /dev/null @@ -1,35 +0,0 @@ -""" -set the 'pledged' badge for people who've pledged -""" - -from django.core.management.base import BaseCommand -from django.contrib.auth.models import User - -from regluit.core.models import Badge -from regluit.payment.models import Transaction - -class Command(BaseCommand): - help = "for people who've pledged, give them a badge!" - - - def handle(self, **options): - pledger= Badge.objects.get(name='pledger') - pledger2= Badge.objects.get(name='pledger2') - print 'start' - print 'pledger badges: %s' % pledger.holders.all().count() - print 'pledger2 badges: %s' % pledger2.holders.all().count() - pledges=Transaction.objects.exclude(status='NONE').exclude(status='Canceled',reason=None).exclude(anonymous=True) - for pledge in pledges: - if pledge.user.profile.badges.all().count(): - if pledge.user.profile.badges.all()[0].id == pledger.id: - pledge.user.profile.badges.remove(pledger) - pledge.user.profile.badges.add(pledger2) - else: - pledge.user.profile.badges.add(pledger) - print 'end' - print 'pledger badges: %s' % pledger.holders.all().count() - print 'pledger2 badges: %s' % pledger2.holders.all().count() - - - - diff --git a/core/management/commands/librarything_load_books.py b/core/management/commands/librarything_load_books.py deleted file mode 100644 index b43e72439..000000000 --- a/core/management/commands/librarything_load_books.py +++ /dev/null @@ -1,16 +0,0 @@ -from django.conf import settings -from django.contrib.auth.models import User -from django.core.management.base import BaseCommand - -from regluit.core import librarything, tasks - -class Command(BaseCommand): - help = "load Librarything books into wishlist" - args = "" - - def handle(self, user_name, lt_username, max_books, **options): - - user = User.objects.get(username=user_name) - max_books = int(max_books) - - tasks.load_librarything_into_wishlist.delay(user.id, lt_username, max_books) \ No newline at end of file diff --git a/core/management/commands/librarything_load_books_2.py b/core/management/commands/librarything_load_books_2.py deleted file mode 100644 index e8cffc802..000000000 --- a/core/management/commands/librarything_load_books_2.py +++ /dev/null @@ -1,16 +0,0 @@ -from django.conf import settings -from django.contrib.auth.models import User -from django.core.management.base import BaseCommand - -from regluit.core import librarything, tasks - -class Command(BaseCommand): - help = "load Librarything books into wishlist" - args = "" - - def handle(self, lt_username, **options): - - lt = librarything.LibraryThing(username=lt_username) - for (i, book) in enumerate(lt.parse_user_catalog(view_style=5)): - print i, book["title"], book["isbn"], book["work_id"], book["book_id"] - \ No newline at end of file diff --git a/core/management/commands/list_editions.py b/core/management/commands/list_editions.py deleted file mode 100644 index ff1f93794..000000000 --- a/core/management/commands/list_editions.py +++ /dev/null @@ -1,11 +0,0 @@ -from django.core.management.base import BaseCommand - -from regluit.core import models - -class Command(BaseCommand): - help = "list all editions in the database" - - def handle(self, *args, **options): - editions = models.Edition.objects.all() - for edition in editions: - print edition.id, edition.title, edition.isbn_10, edition.isbn_13 diff --git a/core/management/commands/list_queued_notices.py b/core/management/commands/list_queued_notices.py deleted file mode 100644 index 5f40bd0af..000000000 --- a/core/management/commands/list_queued_notices.py +++ /dev/null @@ -1,11 +0,0 @@ -import pickle - -import notification -from django.core.management.base import BaseCommand - -class Command(BaseCommand): - help = "Displays currently queues notices from django-notification" - - def handle(self, **options): - for (i, queued_batch) in enumerate(notification.models.NoticeQueueBatch.objects.all()): - print i, queued_batch.id, pickle.loads(str(queued_batch.pickled_data).decode("base64")) diff --git a/core/management/commands/load_books.py b/core/management/commands/load_books.py index 57faa7d94..b530ec1fe 100644 --- a/core/management/commands/load_books.py +++ b/core/management/commands/load_books.py @@ -4,13 +4,14 @@ class Command(BaseCommand): help = "load books based on a text file of ISBNs" - args = "" + def add_arguments(self, parser): + parser.add_argument('filename', nargs='+', help="filename") def handle(self, filename, **options): for isbn in open(filename): isbn = isbn.strip() edition = bookloader.add_by_isbn(isbn) if edition: - print "loaded %s as %s" % (isbn, edition) + self.stdout.write("loaded %s as %s" % (isbn, edition)) else: - print "failed to load book for %s" % isbn + self.stdout.write("failed to load book for %s" % isbn) diff --git a/core/management/commands/load_books_from_onix_csv.py b/core/management/commands/load_books_from_onix_csv.py index 66610f544..05538bcf0 100644 --- a/core/management/commands/load_books_from_onix_csv.py +++ b/core/management/commands/load_books_from_onix_csv.py @@ -5,9 +5,10 @@ class Command(BaseCommand): help = "load books based on a csv spreadsheet of onix data" - args = "" + def add_arguments(self, parser): + parser.add_argument('filename', nargs='+', help="filename") def handle(self, filename, **options): sheetreader= UnicodeDictReader(open(filename,'rU'), dialect=csv.excel) load_from_books(sheetreader) - print "finished loading" + self.stdout.write("finished loading") diff --git a/core/management/commands/load_books_from_onix_tsv.py b/core/management/commands/load_books_from_onix_tsv.py index dfa82aa2d..d6eb2f0b7 100644 --- a/core/management/commands/load_books_from_onix_tsv.py +++ b/core/management/commands/load_books_from_onix_tsv.py @@ -5,9 +5,10 @@ class Command(BaseCommand): help = "load books based on a csv spreadsheet of onix data" - args = "" + def add_arguments(self, parser): + parser.add_argument('filename', nargs='+', help="filename") def handle(self, filename, **options): sheetreader= UnicodeDictReader(open(filename,'rU'), dialect=csv.excel_tab) load_from_books(sheetreader) - print "finished loading" + pself.stdout.write("finished loading") diff --git a/core/management/commands/load_books_from_sitemap.py b/core/management/commands/load_books_from_sitemap.py index dcc886ad3..5668472c8 100644 --- a/core/management/commands/load_books_from_sitemap.py +++ b/core/management/commands/load_books_from_sitemap.py @@ -35,6 +35,19 @@ def handle(self, url, max=None, **options): if max and max < 0: break else: - books = add_by_sitemap(url, maxnum=max) + books = add_by_sitemap(url, maxnum=max) + + for edition in books: + done_fmt = set() + for ebook in edition.work.ebooks_all(): + for fmt in ['pdf', 'epub', 'mobi']: + if ebook.format == fmt: + if fmt not in done_fmt: + ebook.activate() + done_fmt.add(fmt) + else: + ebook.deactivate() + + - print "loaded {} books".format(len(books)) + self.stdout.write("loaded {} books".format(len(books))) diff --git a/core/management/commands/load_books_ku.py b/core/management/commands/load_books_ku.py new file mode 100644 index 000000000..58599be83 --- /dev/null +++ b/core/management/commands/load_books_ku.py @@ -0,0 +1,17 @@ +from django.core.management.base import BaseCommand + +from regluit.core.loaders.ku import load_ku, activate_ku_ebooks + +class Command(BaseCommand): + help = "load books from knowledge unlatched" + + def add_arguments(self, parser): + parser.add_argument('round', nargs='?', type=int, default=None, help="round to load") + + + def handle(self, round, **options): + books = load_ku(round) + self.stdout.write("loaded {} books".format(len(books))) + activated = activate_ku_ebooks() + self.stdout.write("activated {} ebooks".format(activated)) + diff --git a/core/management/commands/load_books_pbdata.py b/core/management/commands/load_books_pbdata.py new file mode 100644 index 000000000..4b5fa3b90 --- /dev/null +++ b/core/management/commands/load_books_pbdata.py @@ -0,0 +1,37 @@ +import json +from datetime import datetime + +from django.core.management.base import BaseCommand + +from regluit.core.loaders import add_by_metadata +from regluit.core.loaders.pressbooks import PressbooksScraper + +class Command(BaseCommand): + help = "load books from a json file from pressbooks" + def add_arguments(self, parser): + parser.add_argument('filename', help="filename") + parser.add_argument( + '--from', + action='store', + dest='from_date', + default='1-1-2000', + help='only read records after ', + ) + + def handle(self, filename, **options): + with open(filename, 'r') as jsonfile: + pb_metadata = json.load(jsonfile) + self.stdout.write(f'reading {len(pb_metadata)} records') + try: + from_date = datetime.strptime(options['from_date'], '%m-%d-%Y') + except ValueError: + from_date = datetime.strptime('1-1-2000', '%m-%d-%Y') + for record in pb_metadata: + if 'updated' in record: + updated = datetime.strptime(record['updated'], '%m-%d-%Y') + if updated < from_date: + continue + scraper = PressbooksScraper(record['url'], initial=record) + add_by_metadata(scraper.metadata) + + self.stdout.write("finished loading") diff --git a/core/management/commands/load_books_routledge.py b/core/management/commands/load_books_routledge.py new file mode 100644 index 000000000..f05aaa144 --- /dev/null +++ b/core/management/commands/load_books_routledge.py @@ -0,0 +1,11 @@ +from django.core.management.base import BaseCommand + +from regluit.core.loaders.routledge import load_routledge + +class Command(BaseCommand): + help = "load books from routledge" + + def handle(self, **options): + books = load_routledge() + self.stdout.write("loaded {} books".format(len(books))) + diff --git a/core/management/commands/load_books_springer.py b/core/management/commands/load_books_springer.py index a7dba9d81..93980e9a5 100644 --- a/core/management/commands/load_books_springer.py +++ b/core/management/commands/load_books_springer.py @@ -1,12 +1,32 @@ from django.core.management.base import BaseCommand -from regluit.core.loaders.springer import load_springer +from regluit.core.loaders.springer import load_springer, SpringerScraper +from regluit.core.bookloader import add_from_bookdatas class Command(BaseCommand): help = "load books from springer open" - args = " " + def add_arguments(self, parser): + parser.add_argument('startpage', nargs='?', type=int, default=1, help="page to start on") + parser.add_argument('endpage', nargs='?', type=int, default=1, help="page to end on") + parser.add_argument('--url', nargs='?', default='', help="url to scrape") + def handle(self, startpage, endpage=0, **options): - books = load_springer(int(startpage), int(endpage)) - print "loaded {} books".format(len(books)) + if options.get('url'): + books = add_from_bookdatas([SpringerScraper(options.get('url'))]) + else: + books = load_springer(int(startpage), int(endpage)) + self.stdout.write("loaded {} books".format(len(books))) + + for edition in books: + done_fmt = set() + for ebook in edition.work.ebooks_all(): + for fmt in ['pdf', 'epub', 'mobi']: + if ebook.format == fmt: + if fmt not in done_fmt: + ebook.activate() + done_fmt.add(fmt) + else: + ebook.deactivate() + diff --git a/core/management/commands/load_by_doab.py b/core/management/commands/load_by_doab.py index beb324838..0df98aca2 100644 --- a/core/management/commands/load_by_doab.py +++ b/core/management/commands/load_by_doab.py @@ -6,7 +6,7 @@ class Command(BaseCommand): help = "load doab books by doab_id via oai" def add_arguments(self, parser): - parser.add_argument('doab_ids', nargs='+', type=int, default=1, help="doab ids to add") + parser.add_argument('doab_ids', nargs='+', default=1, help="doab ids to add") def handle(self, doab_ids, **options): for doab_id in doab_ids: diff --git a/core/management/commands/load_doab.py b/core/management/commands/load_doab.py index 10856e65e..214c76c95 100644 --- a/core/management/commands/load_doab.py +++ b/core/management/commands/load_doab.py @@ -1,18 +1,29 @@ +import datetime from django.core.management.base import BaseCommand from regluit.core.loaders import doab +def timefromiso(datestring): + try: + return datetime.datetime.strptime(datestring, "%Y-%m-%d") + except: + return datetime.datetime.strptime(datestring, "%Y-%m-%dT%H:%M:%S") + class Command(BaseCommand): help = "load doab books via oai" - args = " " - - def handle(self, from_year= None, limit=None, **options): - from_year = int(from_year) if from_year else None - limit = int(limit) if limit else None - if limit: - doab.load_doab_oai(from_year=from_year, limit=limit) - else: - if from_year: - doab.load_doab_oai(from_year=from_year) - else: - doab.load_doab_oai() + + def add_arguments(self, parser): + parser.add_argument('from_date', nargs='?', type=timefromiso, + default=None, help="YYYY-MM-DD to start") + parser.add_argument('--until', nargs='?', type=timefromiso, + default=None, help="YYYY-MM-DD to end") + parser.add_argument('--max', nargs='?', type=int, default=None, help="max desired records") + + def handle(self, from_date, **options): + until_date = options['until'] + max = options['max'] + self.stdout.write('starting at date:{} until:{}, max: {}'.format( + from_date, until_date, max)) + records, new_doabs, last_time = doab.load_doab_oai(from_date, until_date, limit=max) + self.stdout.write('loaded {} records ({} new), ending at {}'.format( + records, new_doabs, last_time)) diff --git a/core/management/commands/load_edp.py b/core/management/commands/load_edp.py new file mode 100644 index 000000000..55961052c --- /dev/null +++ b/core/management/commands/load_edp.py @@ -0,0 +1,10 @@ +from django.core.management.base import BaseCommand + +from regluit.core.loaders.multiscrape import edp_scrape + + +class Command(BaseCommand): + help = "load books from edp-open" + + def handle(self, **options): + edp_scrape() diff --git a/core/management/commands/load_gutenberg.py b/core/management/commands/load_gutenberg.py deleted file mode 100644 index e99891f82..000000000 --- a/core/management/commands/load_gutenberg.py +++ /dev/null @@ -1,33 +0,0 @@ -""" -Load the Gutenberg editions - -""" - -from django.core.management.base import BaseCommand - -from regluit.core import models -from regluit.test import booktests - -class Command(BaseCommand): - help = "load Gutenberg editions" - args = "" - - def handle(self, max_num, **options): - - try: - max_num = int(max_num) - except: - max_num = None - - print "number of Gutenberg editions (before)", \ - models.Edition.objects.filter(identifiers__type='gtbg').count() - print "number of Gutenberg ebooks (before)", \ - models.Ebook.objects.filter(edition__identifiers__type='gtbg').count() - - booktests.load_gutenberg_books(max_num=max_num) - - print "number of Gutenberg editions (after)", \ - models.Edition.objects.filter(identifiers__type='gtbg').count() - print "number of Gutenberg ebooks (after)", \ - models.Ebook.objects.filter(edition__identifiers__type='gtbg').count() - diff --git a/core/management/commands/load_wishlist.py b/core/management/commands/load_wishlist.py deleted file mode 100644 index 14f452ca7..000000000 --- a/core/management/commands/load_wishlist.py +++ /dev/null @@ -1,21 +0,0 @@ -from django.core.management.base import BaseCommand -from django.contrib.auth.models import User - -from regluit.core import bookloader - -class Command(BaseCommand): - help = "populate a user's wishlist with books from a file of isbns" - args = " " - - def handle(self, filename, username, **options): - user = User.objects.get(username=username) - wishlist = user.wishlist - for isbn in open(filename): - isbn = isbn.strip() - edition = bookloader.add_by_isbn(isbn) - if edition: - bookloader.add_related(isbn) - user.wishlist.add_work(edition.work, source="user") - print "loaded %s as %s for %s" % (isbn, edition, user) - else: - print "failed to load book for %s" % isbn diff --git a/core/management/commands/make_missing_mobis.py b/core/management/commands/make_missing_mobis.py deleted file mode 100644 index 562aa9ccb..000000000 --- a/core/management/commands/make_missing_mobis.py +++ /dev/null @@ -1,44 +0,0 @@ -from django.core.management.base import BaseCommand -from regluit.core.models import Work, EbookFile - - -class Command(BaseCommand): - help = "generate mobi ebooks where needed and possible." - - def add_arguments(self, parser): - parser.add_argument('max', nargs='?', type=int, default=1, help="maximum mobis to make") - parser.add_argument('--reset', '-r', action='store_true', help="reset failed mobi conversions") - - - def handle(self, max=None, **options): - maxbad = 10 - if options['reset']: - bads = EbookFile.objects.filter(mobied__lt=0) - for bad in bads: - bad.mobied = 0 - bad.save() - - epubs = Work.objects.filter(editions__ebooks__format='epub').distinct().order_by('-id') - - i = 0 - n_bad = 0 - for work in epubs: - if not work.ebooks().filter(format="mobi"): - for ebook in work.ebooks().filter(format="epub"): - ebf = ebook.get_archive_ebf() - if ebf and ebf.mobied >= 0: - try: - print u'making mobi for {}'.format(work.title) - if ebf.make_mobi(): - print 'made mobi' - i += 1 - break - else: - self.stdout.write('failed to make mobi') - n_bad += 1 - - except: - self.stdout.write('failed to make mobi') - n_bad += 1 - if i >= max or n_bad >= maxbad: - break diff --git a/core/management/commands/no_isbn_works.py b/core/management/commands/no_isbn_works.py deleted file mode 100644 index 00921b500..000000000 --- a/core/management/commands/no_isbn_works.py +++ /dev/null @@ -1,27 +0,0 @@ -""" -list works with no isbn -""" - -from django.core.management.base import BaseCommand -from django.db.models import Count - -from regluit.core import models - -class Command(BaseCommand): - help = "list works with no isbn. actions: count, list, wished" - args = "" - - - def handle(self, action='count', **options): - no_isbn_works=models.Work.objects.exclude(identifiers__type='isbn') - num=no_isbn_works.count() - print "%s works without isbn:"% num - if action=='list': - for work in no_isbn_works: - print "%s, %s"% (work.id, work.title) - elif action=='wished': - print "%s wished works without isbn:"% no_isbn_works.filter(num_wishes__gt=0).count() - for work in no_isbn_works.filter(num_wishes__gt=0): - print "%s, %s, %s"% (work.id, work.title, work.num_wishes) - - diff --git a/core/management/commands/old_campaign_stats.py b/core/management/commands/old_campaign_stats.py deleted file mode 100644 index 62ffa4db4..000000000 --- a/core/management/commands/old_campaign_stats.py +++ /dev/null @@ -1,59 +0,0 @@ -from django.core.management.base import BaseCommand -from django.db.models import Q, F, Count, Sum - -from regluit.core.models import Campaign - -STATS_TEMPLATE = """Total Pledged: {0} by {1} Pledgers - -Distribution of Pledges: - -{2} - -Premiums Offered: - -{3} - -Premiums Selected: - -{4} - -Number of Transactions without premiums selected: {5}""" - -def campaign_stats(c): - # Use aggregations: https://docs.djangoproject.com/en/dev/topics/db/aggregation/#cheat-sheet - - transactions = c.transaction_set.filter(Q(status='Canceled') & Q(reason ='Amazon FPS shutdown')) - - amount_sum = transactions.aggregate(Sum('amount'))['amount__sum'] - number_pledgers = transactions.count() - - # do we have unique - - amount_table = "Level\tCount\tTotal\n" + "\n".join(["{0}\t{1}\t{2}".format(k['amount'], k['count_amount'], k['amount']*k['count_amount']) for k in transactions.values('amount').annotate(count_amount=Count('amount')).order_by('-amount')]) - - # premiums offered - - premiums_offered = "id\tamount\tdescription\tcampaign_id\n" + "\n".join(["{0}\t{1}\t{2}\t{3}".format(p.id, p.amount, p.description, p.campaign_id) for p in c.effective_premiums()]) - - transactions_null_premiums_count = transactions.filter(premium__isnull=True).count() - -# list stats around premiums - - premium_selected = "Amount\tCount\tPrem. id\tDescription\n" + \ - "\n".join(["{0}\t{1}\t{2}\t{3}".format(k['premium__amount'], k['count_premium'], - k['premium'], k['premium__description']) for k in - transactions.filter(premium__isnull=False).values('premium', - 'premium__description', 'premium__amount').annotate(count_premium=Count( 'premium')).order_by('premium__amount')]) - - return(STATS_TEMPLATE.format(amount_sum, number_pledgers, amount_table, premiums_offered, premium_selected, transactions_null_premiums_count)) - -class Command(BaseCommand): - help = "Displays data about old campaigns" - # args = " " - - def handle(self, **options): - - # Melinda's campaign - c6 = Campaign.objects.get(id=6) - print campaign_stats(c6) - diff --git a/core/management/commands/random_campaigns.py b/core/management/commands/random_campaigns.py deleted file mode 100644 index 163b6a415..000000000 --- a/core/management/commands/random_campaigns.py +++ /dev/null @@ -1,48 +0,0 @@ -from datetime import timedelta -from decimal import Decimal as D -from random import randint, randrange - -from django.conf import settings -from django.core.management.base import BaseCommand -from django.utils.timezone import now - -from regluit.core.models import Work, Campaign - -class Command(BaseCommand): - help = "creates random campaigns for any works that lack one for testing" - - def handle(self, *args, **options): - for work in Work.objects.all(): - if work.campaigns.all().count() > 0: - continue - campaign = Campaign() - campaign.name = work.title - campaign.work = work - campaign.description = "Test Campaign" - - # random campaign target between $200 and $10,000 - campaign.target = D(randint(200,10000)) - - # add a test rightsholder recipient right now - campaign.paypal_receiver = settings.PAYPAL_TEST_RH_EMAIL - - # random deadline between 5 days from now and 180 days from now - _now = now() - campaign.deadline = random_date(_now + timedelta(days=5), - _now + timedelta(days=180)) - - # randomly activate some of the campaigns - coinflip = D(randint(0,10)) - if coinflip > 5: - campaign.activate() - - campaign.save() - print "campaign %s...status: %s" % (unicode(campaign).encode('ascii','replace') , campaign.status) - - -def random_date(start, end): - delta = end - start - int_delta = (delta.days * 24 * 60 * 60) + delta.seconds - random_second = randrange(int_delta) - return (start + timedelta(seconds=random_second)) - diff --git a/core/management/commands/random_tasks.py b/core/management/commands/random_tasks.py deleted file mode 100644 index 98246ffd2..000000000 --- a/core/management/commands/random_tasks.py +++ /dev/null @@ -1,59 +0,0 @@ -""" -a command that creates a given number of random tasks to test out celery -""" - -import random - -from django.core.management.base import BaseCommand - -from regluit.core import tasks -from regluit.core.models import CeleryTask - -random.seed() - -class Command(BaseCommand): - help = "create random tasks" - args = "" - - def handle(self, num_tasks, action, **options): - """ - actions: - - c: create num_tasks tasks - s: print state of existing tasks - d: delete all tasks - an integer: compute factorial of the integer -- can then follow up with s to find the state - """ - import django - django.db.transaction.enter_transaction_management() - if action == 'c': - for i in xrange(int(num_tasks)): - n = random.randint(1,1000) - task_id = tasks.fac.delay(n) - - ct = CeleryTask() - ct.task_id = task_id - ct.function_name = 'fac' - ct.function_args = n - ct.description = "Factorial of %d" % (n) - ct.save() - elif action == 's': - for (i, ct) in enumerate(CeleryTask.objects.all()): - print i, ct.function_args, ct.state, ct.info - elif action == 'd': - CeleryTask.objects.all().delete() - else: - try: - action = int(action) - print 'action: %d' % (int(action)) - task_id = tasks.fac.delay(int(action),sleep_interval=0.001) - - ct = CeleryTask() - ct.task_id = task_id - ct.function_name = 'fac' - ct.function_args = action - ct.description = "Factorial of %d" % (action) - ct.save() - except Exception, e: - print e - django.db.transaction.commit() \ No newline at end of file diff --git a/core/management/commands/random_wishlists.py b/core/management/commands/random_wishlists.py deleted file mode 100644 index eb61b75cc..000000000 --- a/core/management/commands/random_wishlists.py +++ /dev/null @@ -1,20 +0,0 @@ -from django.contrib.auth.models import User -from django.core.management.base import BaseCommand - -from regluit.core.models import Work - -class Command(BaseCommand): - help = "creates random wishlists for any users" - - def handle(self, *args, **options): - for user in User.objects.all(): - print user - try: - if user.wishlist.works.all().count() != 0: - continue - for work in Work.objects.all(): - print "adding %s to %s's wishlist" % (work, user) - user.wishlist.add_work(work, 'random') - except Exception, e: - print e - pass diff --git a/core/management/commands/recluster_singletons.py b/core/management/commands/recluster_singletons.py index b8ee07a18..e41f2cd55 100644 --- a/core/management/commands/recluster_singletons.py +++ b/core/management/commands/recluster_singletons.py @@ -14,39 +14,56 @@ class Command(BaseCommand): help = "add and merge editions for singleton works" args = " " + def add_arguments(self, parser): + parser.add_argument('language', nargs='+', help="language code") + parser.add_argument('max', nargs='?', type='int', default=100, help="max singletons to process") + parser.add_argument('start', nargs='?', type='int', default=0, help="start") def handle(self, language, max=100, start=0, **options): - print "Number of singleton Works with language = %s: %s" % (language, models.Work.objects.annotate(num_editions=Count('editions')).filter(num_editions=1, language=language).count()) + self.stdout.write("Number of singleton Works with language = %s: %s" % ( + language, + models.Work.objects.annotate( + num_editions=Count('editions')).filter(num_editions=1, language=language).count() + ) + ) - try: - max = int(max) - except: - max = 100 - try: - start = int(start) - except: - start = 0 - - for (i, work) in enumerate(islice(models.Work.objects.annotate(num_editions=Count('editions')).filter(num_editions=1, language=language),start,start+max)): + for (i, work) in enumerate(islice( + models.Work.objects.annotate( + num_editions=Count('editions')).filter(num_editions=1, language=language), + start, + start + max + ) + ): #check that there's still only one edition - print "%d %s id:%s #editions:%d #isbn:%s -->" % (i, work.title.encode('ascii','ignore'), work.id, work.editions.count(), work.first_isbn_13()), - work_id=work.id + self.stdout.write("%d %s id:%s #editions:%d #isbn:%s -->" % ( + i, + work.title.encode('ascii','ignore'), + work.id, + work.editions.count(), + work.first_isbn_13(), + )) + work_id = work.id if work.editions.count() != 1: - print + self.stdout.write() continue isbn=work.first_isbn_13() if isbn: new_work = bookloader.relate_isbn( isbn ) if new_work is None: - print "failed to get edition" + self.stdout.write("failed to get edition") elif new_work.id != work_id: - print "added edition to work %s with %s editions" % (new_work.id, new_work.editions.count()) + self.stdout.write("added edition to work %s with %s editions" % (new_work.id, new_work.editions.count())) else: if work.editions.count()>1: - print "singleton joined to new edition" + self.stdout.write("singleton joined to new edition") else: - print "singleton edition not moved" + self.stdout.write("singleton edition not moved") else: - print "no ISBN for this work and therefore no new editions" - print "Updated Number of singleton Works with language = %s: %s" % (language,models.Work.objects.annotate(num_editions=Count('editions')).filter(num_editions=1, language=language).count() ) + self.stdout.write("no ISBN for this work and therefore no new editions") + self.stdout.write("Updated Number of singleton Works with language = %s: %s" % ( + language, + models.Work.objects.annotate( + num_editions=Count('editions')).filter(num_editions=1,language=language).count() + ) + ) diff --git a/core/management/commands/rectify_OLA_acknames.py b/core/management/commands/rectify_OLA_acknames.py deleted file mode 100644 index b5dc2c5fa..000000000 --- a/core/management/commands/rectify_OLA_acknames.py +++ /dev/null @@ -1,26 +0,0 @@ -""" -one-time command to ensure transaction.ack_name for OLA returns something sensible -see https://github.com/Gluejar/regluit/pull/97#discussion_r2436193 -""" - -from django.core.management.base import BaseCommand -from regluit.core.models import Campaign -from regluit.payment.models import Transaction - -class Command(BaseCommand): - help = "make sure transaction.ack_name returns something sensible for OLA transactions" - - def handle(self, **options): - ola_campaign = Campaign.objects.filter(work__id=81834) - assert ola_campaign.count() == 1 - ola_campaign = ola_campaign[0] - ola_transactions = Transaction.objects.filter(campaign=ola_campaign) - for t in ola_transactions: - if t.anonymous: - t.extra.update({"ack_name": ''}) - else: - ack_name=t.extra.get("ack_name",'') - if not ack_name: - t.extra.update({"ack_name": t.user.username}) - t.extra.update({"ack_dedication": ''}) - t.save() diff --git a/core/management/commands/refresh_free.py b/core/management/commands/refresh_free.py new file mode 100644 index 000000000..d687c70ef --- /dev/null +++ b/core/management/commands/refresh_free.py @@ -0,0 +1,19 @@ +from django.core.management.base import BaseCommand +from django.db.models import Sum + +from regluit.core.models import Work + + + +class Command(BaseCommand): + '''remove works and editions without titles''' + help = "remove works and editions without titles" + + def handle(self, **options): + qs = Work.objects.annotate(num_free=Sum('editions__ebooks__active')).filter(num_free__gt=0) + for free in qs.filter(is_free=False): + self.stdout.write('freeing %s' % free.title) + free.is_free = True + for subject in free.subjects.all(): + subject.count_free() + free.save() diff --git a/core/management/commands/relookup_isbns.py b/core/management/commands/relookup_isbns.py index bc1db56cd..e6c4a7677 100644 --- a/core/management/commands/relookup_isbns.py +++ b/core/management/commands/relookup_isbns.py @@ -8,17 +8,18 @@ class Command(BaseCommand): help = "relookup all editions attached to language=xx works" - args = "" + def add_arguments(self, parser): + parser.add_argument('title', nargs='?', default='', help="start of title") def handle(self, title='', **options): - print "Number of Works with language=xx, title like %s: %s" % (title, models.Work.objects.filter(language='xx', title__istartswith=title).count()) - updated_num=0 + self.stdout.write("Number of Works with language=xx, title like %s: %s" % (title, models.Work.objects.filter(language='xx', title__istartswith=title).count())) + updated_num = 0 for work in models.Work.objects.filter(language='xx', title__istartswith=title): - print "updating work %s" % work + self.stdout.write("updating work %s" % work) for edition in work.editions.all(): - print "updating edition %s" % edition + self.stdout.write("updating edition %s" % edition) updated = bookloader.update_edition(edition) if updated.work.language!= 'xx': updated_num+=1 - print "Number of updated editions= %s" % updated_num + self.stdout.write("Number of updated editions= %s" % updated_num) diff --git a/core/management/commands/remove_404s.py b/core/management/commands/remove_404s.py new file mode 100644 index 000000000..bf98f944d --- /dev/null +++ b/core/management/commands/remove_404s.py @@ -0,0 +1,43 @@ +import requests +from django.core.management.base import BaseCommand + +from regluit.core.models import Ebook + +class Command(BaseCommand): + help = "check ebooks for 404s and remove if needed" + args = "<limit>" + + def add_arguments(self, parser): + parser.add_argument('limit', nargs='?', type=int, default=50, help="max to check") + parser.add_argument('--ebook', nargs='?', type=int, default=0, help="ebook to check") + parser.add_argument('--provider', nargs='?', default='', help="provider to check") + parser.add_argument('--format', nargs='?', default='online', help="format to check") + + def handle(self, limit=0, **options): + limit = int(limit) if limit else 0 + format = options.get('format') + if format == 'all': + onlines = Ebook.objects.all() + else: + onlines = Ebook.objects.filter(format=format) + if options.get('ebook'): + onlines = Ebook.objects.filter(id=options.get('ebook')) + elif options.get('provider'): + onlines = onlines.filter(provider=options.get('provider')) + removed = [] + done = 0 + for online in onlines: + if not online.ebook_files.exists(): + try: + r = requests.get(online.url) + if r.status_code == 404: + removed.append(online.edition.id) + self.stdout.write(online.edition.title) + online.delete() + except UnicodeDecodeError: + self.stdout.write("Encoding error for %s" % online.url) + done +=1 + if done >= limit or done >= 500: + break + self.stdout.write("%s ebooks checked" % done) + self.stdout.write("%s ebooks removed" % len(removed)) diff --git a/core/management/commands/remove_orphan_editions.py b/core/management/commands/remove_orphan_editions.py index fa63ae50b..94d7f626d 100644 --- a/core/management/commands/remove_orphan_editions.py +++ b/core/management/commands/remove_orphan_editions.py @@ -15,4 +15,4 @@ def handle(self, **options): edition.delete() deleted=deleted+1 numeditions=numeditions+1 - print "%s deleted from %s total" % (deleted, numeditions) + self.stdout.write("%s deleted from %s total" % (deleted, numeditions)) diff --git a/core/management/commands/remove_orphan_works.py b/core/management/commands/remove_orphan_works.py index ce1adbf14..1acc804de 100644 --- a/core/management/commands/remove_orphan_works.py +++ b/core/management/commands/remove_orphan_works.py @@ -15,4 +15,4 @@ def handle(self, **options): work.delete() deleted=deleted+1 numworks=numworks+1 - print "%s deleted from %s total" % (deleted, numworks) + self.stdout.write("%s deleted from %s total" % (deleted, numworks)) diff --git a/core/management/commands/seed_degruyter.html b/core/management/commands/seed_degruyter.html deleted file mode 100644 index 30b1aaadc..000000000 --- a/core/management/commands/seed_degruyter.html +++ /dev/null @@ -1,40 +0,0 @@ -<div class="launch_top" id="degruyter_countdown" style="font-size:20px;text-align:center;width:50%"></div> - -<h4>Help us unglue this book!</h4> -<p>De Gruyter has agreed to run an ungluing campaign for this book, if it can get enough support from ungluers like you. The target price will be $2100, after which the book will be free for everyone on earth to read, copy, and share, forever (under a Creative Commons <a href="https://creativecommons.org/licenses/by-nc-nd/3.0/">BY-NC-ND</a> license).</p> - -<p>They'll launch a campaign when 50 ungluers have wished for this book. Right now <span id="wisher_data"></span>. </p> - -<p id="cta"></p> - -<hr> - -<script type="text/javascript"> - var $j = jQuery.noConflict(); - $j(document).ready(function(){ - var countdown = 50 - numWishers; - if(countdown == 1) { - $j("#degruyter_countdown").html("Only 1 more ungluer to go!"); - } else { - $j("#degruyter_countdown").html(countdown + " ungluers to go"); - } - - if(numWishers == 1) { - var wisherDataText = "1 ungluer has wished for this book" - } else { - var wisherDataText = numWishers + " ungluers have wished for this book" - } - $j("#wisher_data").html(wisherDataText); - - if(isSupporter){ - if(numWishers == 1) { - var callToAction = "Thanks for getting things started! Will you ask your friends to join you?" - } else { - var callToAction = "Thanks for being one of them! Will you ask your friends to join you?" - } - } else { - var callToAction = "Won't you join in?" - } - $j("#cta").html(callToAction); - }); -</script> diff --git a/core/management/commands/seed_degruyter_templates.py b/core/management/commands/seed_degruyter_templates.py deleted file mode 100644 index 3604a923b..000000000 --- a/core/management/commands/seed_degruyter_templates.py +++ /dev/null @@ -1,22 +0,0 @@ -""" -seed empty but initialized deGruyter books with something useful -""" - -from django.core.management.base import BaseCommand -from regluit.core.models import Work - -class Command(BaseCommand): - help = "Seed empty but initialized deGruyter books with something useful. Takes filename containing seed description as argument. Can be safely run more than once; will ignore books with descriptions." - - def handle(self, filename, **options): - books = Work.objects.filter(editions__publisher_name__id=4311, campaigns__status="INITIALIZED") - for book in books: - if not 'degruyter_countdown' in book.description: - """ - read in file and prepend to description - ignores descriptions that already start with the seed file - """ - seed_file = open(filename) - book.description = seed_file.read() + book.description - book.save() - seed_file.close() \ No newline at end of file diff --git a/core/management/commands/set_campaign_editions.py b/core/management/commands/set_campaign_editions.py deleted file mode 100644 index 0a7ff944d..000000000 --- a/core/management/commands/set_campaign_editions.py +++ /dev/null @@ -1,14 +0,0 @@ -from django.core.management.base import BaseCommand -from regluit.core.models import Campaign - -class Command(BaseCommand): - help = "set campaign edition for every campaign" - - def handle(self, **options): - fixed = 0 - for campaign in Campaign.objects.all(): - if not campaign.edition: - campaign.edition = campaign.work.editions.all()[0] - campaign.save() - fixed +=1 - print "{} campaign editions set".format(fixed) diff --git a/core/management/commands/set_key.py b/core/management/commands/set_key.py deleted file mode 100644 index 6ce71264d..000000000 --- a/core/management/commands/set_key.py +++ /dev/null @@ -1,12 +0,0 @@ -from django.core.management.base import BaseCommand -from regluit.core.models import Key - -class Command(BaseCommand): - help = "set a core.models.Key with name value" - args = "<name> <value>" - - def handle(self, name, value, **options): - (k, created) = Key.objects.get_or_create(name=name) - k.value = value - k.save() - diff --git a/core/management/commands/subjects_from_bic.py b/core/management/commands/subjects_from_bic.py new file mode 100644 index 000000000..ddbdf7caf --- /dev/null +++ b/core/management/commands/subjects_from_bic.py @@ -0,0 +1,18 @@ +import string +from django.core.management.base import BaseCommand +from regluit.core.models import Subject +from regluit.core.validation import explode_bic + +class Command(BaseCommand): + help = "explode compound bic subjects from doab" + + def handle(self, **options): + matches=0 + for subject in Subject.objects.filter(name__startswith='bic Book Indus'): + newsubs = explode_bic(subject.name) + for work in subject.works.all(): + for subsub in newsubs: + Subject.set_by_name(subsub, work) + subject.delete() + + self.stdout.write("bic headings exploded" ) diff --git a/core/management/commands/subjects_to_bisac.py b/core/management/commands/subjects_to_bisac.py index 679b6b1ab..31771e5f9 100644 --- a/core/management/commands/subjects_to_bisac.py +++ b/core/management/commands/subjects_to_bisac.py @@ -14,4 +14,4 @@ def handle(self, **options): subject.name = bisac_heading.full_label subject.save() matches += 1 - print "%s bisac headings converted" % matches + self.stdout.write("%s bisac headings converted" % matches) diff --git a/core/management/commands/translate_doab_ids.py b/core/management/commands/translate_doab_ids.py new file mode 100644 index 000000000..4de5ac94b --- /dev/null +++ b/core/management/commands/translate_doab_ids.py @@ -0,0 +1,74 @@ +import csv +import json +import boto3 +from botocore.exceptions import ClientError + +from django.conf import settings +from django.core.management.base import BaseCommand + +from regluit.core.models import Edition, Identifier + +s3 = boto3.resource('s3', + aws_access_key_id=settings.AWS_ACCESS_KEY_ID, + aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY) + + + +class Command(BaseCommand): + help = "translate doab ids to handles" + def add_arguments(self, parser): + parser.add_argument('filename', nargs='?', help="filename") + parser.add_argument('--old_id', nargs='?', default=None, help="id to translate") + + def handle(self, filename, **options): + self.stdout.write("doab ids to start: %s" % Identifier.objects.filter(type='doab').count()) + with open(filename, 'r') as jsonfile: + newdoab = json.loads(jsonfile.read()) + done = 0 + if options['old_id']: + to_do = Identifier.objects.filter(type='doab', value=options['old_id']) + else: + to_do = Identifier.objects.filter(type='doab') + for doab in to_do: + if doab.value.startswith("20.500.12854"): + continue + if doab.value in newdoab: + # already done + if Identifier.objects.filter(type='doab', value=newdoab[doab.value]).exists(): + doab.delete() + else: + old_cover_file_name = 'doab/%s/cover' % doab.value + new_cover_file_name = 'doab/%s' % newdoab[doab.value] + self.move_cover(old_cover_file_name, new_cover_file_name) + doab.value = newdoab[doab.value] + doab.save() + else: + doab.delete() + done += 1 + self.stdout.write("doab ids at end: %s" % Identifier.objects.filter(type='doab').count()) + self.stdout.write("done:: %s" % done) + + def move_cover(self, old_name, new_name): + if old_name == new_name: + return + old_url = "https://{}.s3.amazonaws.com/{}".format( + settings.AWS_STORAGE_BUCKET_NAME, old_name) + new_url = "https://{}.s3.amazonaws.com/{}".format( + settings.AWS_STORAGE_BUCKET_NAME, new_name) + copy_source = { + 'Bucket': settings.AWS_STORAGE_BUCKET_NAME, + 'Key': old_name + } + try: + s3.meta.client.copy_object( + CopySource=copy_source, + Bucket=settings.AWS_STORAGE_BUCKET_NAME, + Key=new_name, ACL='public-read') + + for ed in Edition.objects.filter(cover_image__contains=old_name): + ed.cover_image = new_url + ed.save() + + s3.meta.client.delete_object(Bucket=settings.AWS_STORAGE_BUCKET_NAME, Key=old_name,) + except ClientError: + self.stdout.write("problem moving %s to %s" % (old_name, new_name)) diff --git a/core/management/commands/update_downloads.py b/core/management/commands/update_downloads.py new file mode 100644 index 000000000..6a9d5479e --- /dev/null +++ b/core/management/commands/update_downloads.py @@ -0,0 +1,51 @@ +import os +from datetime import datetime + +from django.conf import settings +from django.core.management.base import BaseCommand +from django.db.models import F, Sum + +from regluit.core.models import Ebook + +DOWNLOAD_LOGFILE = settings.LOGGING['handlers']['downloads']['filename'] + +class Command(BaseCommand): + '''add logged downloads to ebook objects''' + help = "add logged downloads to ebook objects" + + def handle(self, **options): + dls = {} + date_format = "%Y-%m-%d" + + this_mo = datetime.today().month + last_month = this_mo - 1 + year = datetime.today().year + if last_month <= 0: + last_month = last_month + 12 + total = 0 + for suffix in ['', '.1','.2','.3','.4','.5',]: + fn = DOWNLOAD_LOGFILE + suffix + if os.path.exists(fn): + with open(fn,'r') as logfile: + for line in logfile.readlines(): + (date, time, colon, ebook) = line.split() + month = datetime.strptime(date, date_format).date().month + if month == last_month: + dls[ebook] = dls.get(ebook, 0) + 1 + total += 1 + + downloads = Ebook.objects.aggregate(total=Sum('download_count'))['total'] + self.stdout.write(f'old count: {downloads} downloads') + self.stdout.write(f'logging {total} downloads for {len(dls)} ebooks') + + for key in dls.keys(): + if dls[key] > settings.DOWNLOAD_LOGS_MAX: + self.stdout.write(f'{dls[key]} downloads for ebook {key} discarded.' ) + continue + try: + Ebook.objects.filter(id=key).update(download_count=F('download_count') + dls[key]) + except Ebook.object.DoesNotExist: + self.stdout.write(f'ebook {key} not found') + + downloads = Ebook.objects.aggregate(total=Sum('download_count'))['total'] + self.stdout.write(f'new count: {downloads} downloads') diff --git a/core/management/commands/update_providers.py b/core/management/commands/update_providers.py new file mode 100644 index 000000000..8a24d749a --- /dev/null +++ b/core/management/commands/update_providers.py @@ -0,0 +1,39 @@ +from django.core.management.base import BaseCommand +from django.db.models import Q + +from regluit.core.loaders.harvest import dl_online, RateLimiter +from regluit.core.models import Ebook + +class Command(BaseCommand): + help = "recalculate provider from url" + args = "<limit>" + + def add_arguments(self, parser): + parser.add_argument('limit', nargs='?', type=int, default=0, help="max to harvest") + + def handle(self, limit=0, **options): + done = 0 + limit = int(limit) if limit else 0 + unstripped = Ebook.objects.filter(Q(provider='') | Q(provider__startswith='www.')) + for ebook in unstripped: + ebook.url = ebook.url.strip() + new_provider = Ebook.infer_provider(ebook.url) + if new_provider != ebook.provider: + ebook.provider = new_provider + ebook.save() + done += 1 + self.stdout.write('{} urls or netloc stripped'.format(done)) + done = 0 + stale = Ebook.objects.filter(Q(provider__icontains='doi') | Q(provider='Handle Proxy')) + self.stdout.write('{} providers to update'.format(stale.count())) + for ebook in stale: + new_provider = Ebook.infer_provider(ebook.url) + if new_provider != ebook.provider: + ebook.provider = new_provider + ebook.save() + done += 1 + if done > limit or done >= 100: + break + self.stdout.write('{} ebooks updated'.format(done)) + if done == 100: + self.stdout.write('50 is the maximum; repeat to do more') diff --git a/core/management/commands/zap_frankenworks.py b/core/management/commands/zap_frankenworks.py deleted file mode 100644 index 850c2aef9..000000000 --- a/core/management/commands/zap_frankenworks.py +++ /dev/null @@ -1,33 +0,0 @@ -""" -Dispose of the Frankenworks and recluster the works. Print out email addresses of those whose wishlists have been -affected. -""" - -from django.core.management.base import BaseCommand -from regluit.test import booktests - -class Command(BaseCommand): - help = "Dispose of the Frankenworks and recluster the works. Print out email addresses of those whose wishlists have been affected." - args = "<do>" - - def handle(self, do, **options): - - try: - do = str(do) - if do.lower() == 'true': - do = True - else: - do = False - except: - do = False - - print "before..." - s = booktests.cluster_status() - print s['results'] - - booktests.clean_frankenworks(s, do=do) - s = booktests.cluster_status() - print "after cleanup...." - print "results ", s['results'] - print "scattered clusters ", s['scattered_clusters'] - print "franken works", s['franken_works'] diff --git a/core/migrations/0001_initial.py b/core/migrations/0001_initial.py index 3c13100e5..7e753d38c 100644 --- a/core/migrations/0001_initial.py +++ b/core/migrations/0001_initial.py @@ -152,7 +152,7 @@ class Migration(migrations.Migration): ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('type', models.CharField(max_length=4)), ('value', models.CharField(max_length=250)), - ('edition', models.ForeignKey(related_name='identifiers', to='core.Edition', null=True)), + ('edition', models.ForeignKey(on_delete=models.CASCADE, related_name='identifiers', to='core.Edition', null=True)), ], ), migrations.CreateModel( @@ -168,7 +168,7 @@ class Migration(migrations.Migration): fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('marc_link_target', models.CharField(default=b'UNGLUE', max_length=6, verbose_name=b'MARC record link targets', choices=[(b'DIRECT', b'Raw link'), (b'UNGLUE', b'Unglue.it link')])), - ('user', models.OneToOneField(related_name='libpref', to=settings.AUTH_USER_MODEL)), + ('user', models.OneToOneField(on_delete=models.CASCADE, related_name='libpref', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( @@ -189,7 +189,7 @@ class Migration(migrations.Migration): ('amount', models.DecimalField(max_digits=10, decimal_places=0)), ('description', models.TextField(null=True)), ('limit', models.IntegerField(default=0)), - ('campaign', models.ForeignKey(related_name='premiums', to='core.Campaign', null=True)), + ('campaign', models.ForeignKey(on_delete=models.CASCADE, related_name='premiums', to='core.Campaign', null=True)), ], ), migrations.CreateModel( @@ -220,7 +220,7 @@ class Migration(migrations.Migration): fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(unique=True, max_length=255)), - ('publisher', models.ForeignKey(related_name='alternate_names', to='core.Publisher', null=True)), + ('publisher', models.ForeignKey(on_delete=models.CASCADE, related_name='alternate_names', to='core.Publisher', null=True)), ], ), migrations.CreateModel( @@ -235,9 +235,9 @@ class Migration(migrations.Migration): name='Relator', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), - ('author', models.ForeignKey(to='core.Author')), - ('edition', models.ForeignKey(related_name='relators', to='core.Edition')), - ('relation', models.ForeignKey(default=1, to='core.Relation')), + ('author', models.ForeignKey(on_delete=models.CASCADE, to='core.Author')), + ('edition', models.ForeignKey(on_delete=models.CASCADE, related_name='relators', to='core.Edition')), + ('relation', models.ForeignKey(on_delete=models.CASCADE, default=1, to='core.Relation')), ], options={ 'db_table': 'core_author_editions', @@ -251,7 +251,7 @@ class Migration(migrations.Migration): ('email', models.CharField(max_length=100, blank=True)), ('rights_holder_name', models.CharField(max_length=100)), ('can_sell', models.BooleanField(default=False)), - ('owner', models.ForeignKey(related_name='rights_holder', to=settings.AUTH_USER_MODEL)), + ('owner', models.ForeignKey(on_delete=models.CASCADE, related_name='rights_holder', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( @@ -286,7 +286,7 @@ class Migration(migrations.Migration): ('goodreads_user_link', models.CharField(max_length=200, null=True, blank=True)), ('avatar_source', models.PositiveSmallIntegerField(default=4, null=True, choices=[(0, b'No Avatar, Please'), (1, b'Gravatar'), (2, b'Twitter'), (3, b'Facebook'), (4, b'Unglueitar')])), ('badges', models.ManyToManyField(related_name='holders', to='core.Badge')), - ('user', models.OneToOneField(related_name='profile', to=settings.AUTH_USER_MODEL)), + ('user', models.OneToOneField(on_delete=models.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( @@ -295,7 +295,7 @@ class Migration(migrations.Migration): ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('was', models.IntegerField(unique=True)), ('moved', models.DateTimeField(auto_now_add=True)), - ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True)), + ('user', models.ForeignKey(on_delete=models.CASCADE, to=settings.AUTH_USER_MODEL, null=True)), ], ), migrations.CreateModel( @@ -314,7 +314,7 @@ class Migration(migrations.Migration): fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created', models.DateTimeField(auto_now_add=True)), - ('user', models.OneToOneField(related_name='wishlist', to=settings.AUTH_USER_MODEL)), + ('user', models.OneToOneField(on_delete=models.CASCADE, related_name='wishlist', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( @@ -330,7 +330,7 @@ class Migration(migrations.Migration): ('publication_range', models.CharField(max_length=50, null=True)), ('featured', models.DateTimeField(db_index=True, null=True, blank=True)), ('is_free', models.BooleanField(default=False)), - ('selected_edition', models.ForeignKey(related_name='selected_works', to='core.Edition', null=True)), + ('selected_edition', models.ForeignKey(on_delete=models.CASCADE, related_name='selected_works', to='core.Edition', null=True)), ], options={ 'ordering': ['title'], @@ -344,17 +344,17 @@ class Migration(migrations.Migration): migrations.AddField( model_name='wishes', name='wishlist', - field=models.ForeignKey(to='core.Wishlist'), + field=models.ForeignKey(on_delete=models.CASCADE, to='core.Wishlist'), ), migrations.AddField( model_name='wishes', name='work', - field=models.ForeignKey(related_name='wishes', to='core.Work'), + field=models.ForeignKey(on_delete=models.CASCADE, related_name='wishes', to='core.Work'), ), migrations.AddField( model_name='waswork', name='work', - field=models.ForeignKey(to='core.Work'), + field=models.ForeignKey(on_delete=models.CASCADE, to='core.Work'), ), migrations.AddField( model_name='subject', @@ -364,16 +364,16 @@ class Migration(migrations.Migration): migrations.AddField( model_name='publisher', name='name', - field=models.ForeignKey(related_name='key_publisher', to='core.PublisherName'), + field=models.ForeignKey(on_delete=models.CASCADE, related_name='key_publisher', to='core.PublisherName'), ), migrations.AddField( model_name='offer', name='work', - field=models.ForeignKey(related_name='offers', to='core.Work'), + field=models.ForeignKey(on_delete=models.CASCADE, related_name='offers', to='core.Work'), ), migrations.AddField( model_name='identifier', name='work', - field=models.ForeignKey(related_name='identifiers', to='core.Work'), + field=models.ForeignKey(on_delete=models.CASCADE, related_name='identifiers', to='core.Work'), ), ] diff --git a/core/migrations/0002_auto_20160722_1716.py b/core/migrations/0002_auto_20160722_1716.py index 6c8eaedfb..f6ff8dc46 100644 --- a/core/migrations/0002_auto_20160722_1716.py +++ b/core/migrations/0002_auto_20160722_1716.py @@ -18,82 +18,82 @@ class Migration(migrations.Migration): migrations.AddField( model_name='hold', name='library', - field=models.ForeignKey(related_name='holds', to='libraryauth.Library'), + field=models.ForeignKey(on_delete=models.CASCADE, related_name='holds', to='libraryauth.Library'), ), migrations.AddField( model_name='hold', name='user', - field=models.ForeignKey(related_name='holds', to=settings.AUTH_USER_MODEL), + field=models.ForeignKey(on_delete=models.CASCADE, related_name='holds', to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='hold', name='work', - field=models.ForeignKey(related_name='holds', to='core.Work'), + field=models.ForeignKey(on_delete=models.CASCADE, related_name='holds', to='core.Work'), ), migrations.AddField( model_name='gift', name='acq', - field=models.ForeignKey(related_name='gifts', to='core.Acq'), + field=models.ForeignKey(on_delete=models.CASCADE, related_name='gifts', to='core.Acq'), ), migrations.AddField( model_name='gift', name='giver', - field=models.ForeignKey(related_name='gifts', to=settings.AUTH_USER_MODEL), + field=models.ForeignKey(on_delete=models.CASCADE, related_name='gifts', to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='edition', name='publisher_name', - field=models.ForeignKey(related_name='editions', to='core.PublisherName', null=True), + field=models.ForeignKey(on_delete=models.CASCADE, related_name='editions', to='core.PublisherName', null=True), ), migrations.AddField( model_name='edition', name='work', - field=models.ForeignKey(related_name='editions', to='core.Work', null=True), + field=models.ForeignKey(on_delete=models.CASCADE, related_name='editions', to='core.Work', null=True), ), migrations.AddField( model_name='ebookfile', name='edition', - field=models.ForeignKey(related_name='ebook_files', to='core.Edition'), + field=models.ForeignKey(on_delete=models.CASCADE, related_name='ebook_files', to='core.Edition'), ), migrations.AddField( model_name='ebook', name='edition', - field=models.ForeignKey(related_name='ebooks', to='core.Edition'), + field=models.ForeignKey(on_delete=models.CASCADE, related_name='ebooks', to='core.Edition'), ), migrations.AddField( model_name='ebook', name='user', - field=models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True), + field=models.ForeignKey(on_delete=models.CASCADE, to=settings.AUTH_USER_MODEL, null=True), ), migrations.AddField( model_name='claim', name='rights_holder', - field=models.ForeignKey(related_name='claim', to='core.RightsHolder'), + field=models.ForeignKey(on_delete=models.CASCADE, related_name='claim', to='core.RightsHolder'), ), migrations.AddField( model_name='claim', name='user', - field=models.ForeignKey(related_name='claim', to=settings.AUTH_USER_MODEL), + field=models.ForeignKey(on_delete=models.CASCADE, related_name='claim', to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='claim', name='work', - field=models.ForeignKey(related_name='claim', to='core.Work'), + field=models.ForeignKey(on_delete=models.CASCADE, related_name='claim', to='core.Work'), ), migrations.AddField( model_name='celerytask', name='user', - field=models.ForeignKey(related_name='tasks', to=settings.AUTH_USER_MODEL, null=True), + field=models.ForeignKey(on_delete=models.CASCADE, related_name='tasks', to=settings.AUTH_USER_MODEL, null=True), ), migrations.AddField( model_name='campaignaction', name='campaign', - field=models.ForeignKey(related_name='actions', to='core.Campaign'), + field=models.ForeignKey(on_delete=models.CASCADE, related_name='actions', to='core.Campaign'), ), migrations.AddField( model_name='campaign', name='edition', - field=models.ForeignKey(related_name='campaigns', to='core.Edition', null=True), + field=models.ForeignKey(on_delete=models.CASCADE, related_name='campaigns', to='core.Edition', null=True), ), migrations.AddField( model_name='campaign', @@ -103,12 +103,12 @@ class Migration(migrations.Migration): migrations.AddField( model_name='campaign', name='publisher', - field=models.ForeignKey(related_name='campaigns', to='core.Publisher', null=True), + field=models.ForeignKey(on_delete=models.CASCADE, related_name='campaigns', to='core.Publisher', null=True), ), migrations.AddField( model_name='campaign', name='work', - field=models.ForeignKey(related_name='campaigns', to='core.Work'), + field=models.ForeignKey(on_delete=models.CASCADE, related_name='campaigns', to='core.Work'), ), migrations.AddField( model_name='author', @@ -118,22 +118,22 @@ class Migration(migrations.Migration): migrations.AddField( model_name='acq', name='lib_acq', - field=models.ForeignKey(related_name='loans', to='core.Acq', null=True), + field=models.ForeignKey(on_delete=models.CASCADE, related_name='loans', to='core.Acq', null=True), ), migrations.AddField( model_name='acq', name='user', - field=models.ForeignKey(related_name='acqs', to=settings.AUTH_USER_MODEL), + field=models.ForeignKey(on_delete=models.CASCADE, related_name='acqs', to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='acq', name='watermarked', - field=models.ForeignKey(to='booxtream.Boox', null=True), + field=models.ForeignKey(on_delete=models.CASCADE, to='booxtream.Boox', null=True), ), migrations.AddField( model_name='acq', name='work', - field=models.ForeignKey(related_name='acqs', to='core.Work'), + field=models.ForeignKey(on_delete=models.CASCADE, related_name='acqs', to='core.Work'), ), migrations.AlterUniqueTogether( name='identifier', diff --git a/core/migrations/0003_auto_20160816_1645.py b/core/migrations/0003_auto_20160816_1645.py index 1ae7aa686..b123b5fc5 100644 --- a/core/migrations/0003_auto_20160816_1645.py +++ b/core/migrations/0003_auto_20160816_1645.py @@ -38,17 +38,17 @@ class Migration(migrations.Migration): migrations.AddField( model_name='workrelation', name='from_work', - field=models.ForeignKey(related_name='works_related_from', to='core.Work'), + field=models.ForeignKey(on_delete=models.CASCADE, related_name='works_related_from', to='core.Work'), ), migrations.AddField( model_name='workrelation', name='to_work', - field=models.ForeignKey(related_name='works_related_to', to='core.Work'), + field=models.ForeignKey(on_delete=models.CASCADE, related_name='works_related_to', to='core.Work'), ), migrations.AddField( model_name='edition', name='note', - field=models.ForeignKey(to='core.EditionNote', null=True), + field=models.ForeignKey(on_delete=models.CASCADE, to='core.EditionNote', null=True), ), migrations.AddField( model_name='work', diff --git a/core/migrations/0005_ebookfile_ebook.py b/core/migrations/0005_ebookfile_ebook.py index c738940b8..923a5b104 100644 --- a/core/migrations/0005_ebookfile_ebook.py +++ b/core/migrations/0005_ebookfile_ebook.py @@ -14,6 +14,6 @@ class Migration(migrations.Migration): migrations.AddField( model_name='ebookfile', name='ebook', - field=models.ForeignKey(related_name='ebook_files', to='core.Ebook', null=True), + field=models.ForeignKey(on_delete=models.CASCADE, related_name='ebook_files', to='core.Ebook', null=True), ), ] diff --git a/core/migrations/0006_auto_20160818_1809.py b/core/migrations/0006_auto_20160818_1809.py index 5b3cbe4c1..ad093254d 100644 --- a/core/migrations/0006_auto_20160818_1809.py +++ b/core/migrations/0006_auto_20160818_1809.py @@ -39,7 +39,7 @@ def add_ebooks_to_ebfs(apps, schema_editor): elif ebf.edition.work.campaigns.filter(type=2): pass else: - print 'ebf {} is dangling'.format(ebf.id) + print('ebf {} is dangling'.format(ebf.id)) def noop(apps, schema_editor): pass diff --git a/core/migrations/0007_auto_20160923_1314.py b/core/migrations/0007_auto_20160923_1314.py index 3f1ee8b25..8feb68fec 100644 --- a/core/migrations/0007_auto_20160923_1314.py +++ b/core/migrations/0007_auto_20160923_1314.py @@ -28,12 +28,12 @@ class Migration(migrations.Migration): migrations.AlterField( model_name='edition', name='note', - field=models.ForeignKey(blank=True, to='core.EditionNote', null=True), + field=models.ForeignKey(on_delete=models.CASCADE, blank=True, to='core.EditionNote', null=True), ), migrations.AlterField( model_name='edition', name='publisher_name', - field=models.ForeignKey(related_name='editions', blank=True, to='core.PublisherName', null=True), + field=models.ForeignKey(on_delete=models.CASCADE, related_name='editions', blank=True, to='core.PublisherName', null=True), ), migrations.AlterField( model_name='userprofile', diff --git a/core/migrations/0015_auto_20180720_1413.py b/core/migrations/0015_auto_20180720_1413.py new file mode 100644 index 000000000..e9bf39bfc --- /dev/null +++ b/core/migrations/0015_auto_20180720_1413.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('core', '0014_auto_20180618_1646'), + ] + + operations = [ + migrations.AlterField( + model_name='userprofile', + name='avatar_source', + field=models.PositiveSmallIntegerField(default=4, null=True, choices=[(0, b'No Avatar, Please'), (1, b'Gravatar'), (2, b'Twitter/Facebook'), (4, b'Unglueitar')]), + ), + migrations.AlterField( + model_name='userprofile', + name='facebook_id', + field=models.CharField(default='', max_length=31, blank=True), + preserve_default=False, + ), + ] diff --git a/core/migrations/0016_auto_20181108_1646.py b/core/migrations/0016_auto_20181108_1646.py new file mode 100644 index 000000000..27fd29c8e --- /dev/null +++ b/core/migrations/0016_auto_20181108_1646.py @@ -0,0 +1,23 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.11.14 on 2018-11-08 16:46 +from __future__ import unicode_literals + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('core', '0015_auto_20180720_1413'), + ] + + operations = [ + migrations.RunSQL( + ['CREATE FULLTEXT INDEX core_work_index ON core_work (title);'], + ['DROP INDEX core_work_index on core_work;'], + ), + migrations.RunSQL( + ['CREATE FULLTEXT INDEX core_author_index ON core_author (name);'], + ['DROP INDEX core_author_index on core_author;'], + ), + ] diff --git a/core/migrations/0017_auto_20190227_1457.py b/core/migrations/0017_auto_20190227_1457.py new file mode 100644 index 000000000..c372feae8 --- /dev/null +++ b/core/migrations/0017_auto_20190227_1457.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.11.14 on 2019-02-27 14:57 +from __future__ import unicode_literals + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('core', '0016_auto_20181108_1646'), + ] + + operations = [ + migrations.AddIndex( + model_name='edition', + index=models.Index(fields=['work'], name='core_editio_work_id_3ae536_idx'), + ), + migrations.AddIndex( + model_name='author', + index=models.Index(fields=['name'], name='core_author_name_fca240_idx'), + ), + migrations.AddIndex( + model_name='subject', + index=models.Index(fields=['name'], name='core_subjec_name_36111e_idx'), + ), + migrations.AddIndex( + model_name='work', + index=models.Index(fields=['is_free', 'title'], name='core_work_is_free_1e4d06_idx'), + ), + ] diff --git a/core/migrations/0018_auto_20200214_1347.py b/core/migrations/0018_auto_20200214_1347.py new file mode 100644 index 000000000..8583ef713 --- /dev/null +++ b/core/migrations/0018_auto_20200214_1347.py @@ -0,0 +1,145 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.11.28 on 2020-02-14 13:47 +from __future__ import unicode_literals + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('core', '0017_auto_20190227_1457'), + ] + + operations = [ + migrations.AlterField( + model_name='acq', + name='license', + field=models.PositiveSmallIntegerField(choices=[(1, 'Individual license'), (2, 'Library License'), (3, 'Borrowed from Library'), (0, 'Just for Testing'), (4, 'On Reserve'), (5, 'Already Thanked')], default=1), + ), + migrations.AlterField( + model_name='badge', + name='description', + field=models.TextField(default='', null=True), + ), + migrations.AlterField( + model_name='campaign', + name='license', + field=models.CharField(choices=[('CC BY-NC-ND', 'Creative Commons Attribution-NonCommercial-NoDerivs 3.0 Unported (CC BY-NC-ND 3.0)'), ('CC BY-NC-SA', 'Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported (CC BY-NC-SA 3.0)'), ('CC BY-NC', 'Creative Commons Attribution-NonCommercial 3.0 Unported (CC BY-NC 3.0)'), ('CC BY-ND', 'Creative Commons Attribution-NoDerivs 3.0 Unported (CC BY-ND 3.0)'), ('CC BY-SA', 'Creative Commons Attribution-ShareAlike 3.0 Unported (CC BY-SA 3.0)'), ('CC BY', 'Creative Commons Attribution 3.0 Unported (CC BY 3.0)'), ('CC0', 'No Rights Reserved (CC0)'), ('GFDL', 'GNU Free Documentation License'), ('LAL', 'Licence Art Libre'), ('OSI', 'OSI Approved License')], default='CC BY-NC-ND', max_length=255), + ), + migrations.AlterField( + model_name='campaign', + name='status', + field=models.CharField(choices=[('INITIALIZED', 'INITIALIZED'), ('ACTIVE', 'ACTIVE'), ('SUSPENDED', 'SUSPENDED'), ('WITHDRAWN', 'WITHDRAWN'), ('SUCCESSFUL', 'SUCCESSFUL'), ('UNSUCCESSFUL', 'UNSUCCESSFUL')], db_index=True, default='INITIALIZED', max_length=15, null=True), + ), + migrations.AlterField( + model_name='campaign', + name='type', + field=models.PositiveSmallIntegerField(choices=[(1, 'Pledge-to-unglue campaign'), (2, 'Buy-to-unglue campaign'), (3, 'Thanks-for-ungluing campaign')], default=1), + ), + migrations.AlterField( + model_name='claim', + name='status', + field=models.CharField(choices=[('active', 'Claim has been accepted.'), ('pending', 'Claim is pending acceptance.'), ('release', 'Claim has not been accepted.')], default='active', max_length=7), + ), + migrations.AlterField( + model_name='ebook', + name='format', + field=models.CharField(choices=[('pdf', 'PDF'), ('epub', 'EPUB'), ('html', 'HTML'), ('text', 'TEXT'), ('mobi', 'MOBI')], max_length=25), + ), + migrations.AlterField( + model_name='ebook', + name='rights', + field=models.CharField(choices=[('CC BY-NC-ND', 'Creative Commons Attribution-NonCommercial-NoDerivs'), ('CC BY-NC-SA', 'Creative Commons Attribution-NonCommercial-ShareAlike'), ('CC BY-NC', 'Creative Commons Attribution-NonCommercial'), ('CC BY-ND', 'Creative Commons Attribution-NoDerivs'), ('CC BY-SA', 'Creative Commons Attribution-ShareAlike'), ('CC BY', 'Creative Commons Attribution'), ('CC0', 'No Rights Reserved (CC0)'), ('GFDL', 'GNU Free Documentation License'), ('LAL', 'Licence Art Libre'), ('OSI', 'OSI Approved License'), ('PD-US', 'Public Domain, US')], db_index=True, max_length=255, null=True), + ), + migrations.AlterField( + model_name='ebook', + name='version_label', + field=models.CharField(blank=True, default='', max_length=255), + ), + migrations.AlterField( + model_name='ebookfile', + name='format', + field=models.CharField(choices=[('pdf', 'PDF'), ('epub', 'EPUB'), ('html', 'HTML'), ('text', 'TEXT'), ('mobi', 'MOBI')], max_length=25), + ), + migrations.AlterField( + model_name='gift', + name='message', + field=models.TextField(default='', max_length=512), + ), + migrations.AlterField( + model_name='libpref', + name='marc_link_target', + field=models.CharField(choices=[('DIRECT', 'Raw link'), ('UNGLUE', 'Unglue.it link')], default='UNGLUE', max_length=6, verbose_name='MARC record link targets'), + ), + migrations.AlterField( + model_name='offer', + name='license', + field=models.PositiveSmallIntegerField(choices=[(1, 'Individual license'), (2, 'Library License')], default=1), + ), + migrations.AlterField( + model_name='publisher', + name='description', + field=models.TextField(blank=True, default='', null=True), + ), + migrations.AlterField( + model_name='rightsholder', + name='address', + field=models.CharField(default='', max_length=400), + ), + migrations.AlterField( + model_name='rightsholder', + name='email', + field=models.CharField(default='', max_length=100), + ), + migrations.AlterField( + model_name='rightsholder', + name='mailing', + field=models.CharField(default='', max_length=400), + ), + migrations.AlterField( + model_name='rightsholder', + name='signature', + field=models.CharField(default='', max_length=100), + ), + migrations.AlterField( + model_name='rightsholder', + name='signer', + field=models.CharField(default='', max_length=100), + ), + migrations.AlterField( + model_name='rightsholder', + name='signer_title', + field=models.CharField(default='', max_length=30), + ), + migrations.AlterField( + model_name='subject', + name='authority', + field=models.CharField(default='', max_length=10), + ), + migrations.AlterField( + model_name='userprofile', + name='avatar_source', + field=models.PositiveSmallIntegerField(choices=[(0, 'No Avatar, Please'), (1, 'Gravatar'), (2, 'Twitter/Facebook'), (4, 'Unglueitar')], default=4, null=True), + ), + migrations.AlterField( + model_name='work', + name='age_level', + field=models.CharField(blank=True, choices=[('', 'No Rating'), ('5-6', "Children's - Kindergarten, Age 5-6"), ('6-7', "Children's - Grade 1-2, Age 6-7"), ('7-8', "Children's - Grade 2-3, Age 7-8"), ('8-9', "Children's - Grade 3-4, Age 8-9"), ('9-11', "Children's - Grade 4-6, Age 9-11"), ('12-14', 'Teen - Grade 7-9, Age 12-14'), ('15-18', 'Teen - Grade 10-12, Age 15-18'), ('18-', 'Adult/Advanced Reader')], default='', max_length=5), + ), + migrations.AlterField( + model_name='work', + name='description', + field=models.TextField(blank=True, default='', null=True), + ), + migrations.AlterField( + model_name='work', + name='language', + field=models.CharField(db_index=True, default='en', max_length=5), + ), + migrations.AlterField( + model_name='workrelation', + name='relation', + field=models.CharField(choices=[('translation', 'translation'), ('revision', 'revision'), ('sequel', 'sequel'), ('part', 'part')], max_length=15), + ), + ] diff --git a/core/migrations/0019_delete_key.py b/core/migrations/0019_delete_key.py new file mode 100644 index 000000000..1daa9f94c --- /dev/null +++ b/core/migrations/0019_delete_key.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.11.28 on 2020-02-17 15:08 +from __future__ import unicode_literals + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('core', '0018_auto_20200214_1347'), + ] + + operations = [ + migrations.DeleteModel( + name='Key', + ), + ] diff --git a/core/migrations/0020_auto_20200720_1319.py b/core/migrations/0020_auto_20200720_1319.py new file mode 100644 index 000000000..e9e82aaf1 --- /dev/null +++ b/core/migrations/0020_auto_20200720_1319.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.11.29 on 2020-07-20 13:19 +from __future__ import unicode_literals + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('core', '0019_delete_key'), + ] + + operations = [ + migrations.RemoveField( + model_name='userprofile', + name='facebook_id', + ), + migrations.AlterField( + model_name='userprofile', + name='avatar_source', + field=models.PositiveSmallIntegerField(choices=[(0, 'No Avatar, Please'), (1, 'Gravatar'), (2, 'Twitter'), (4, 'Unglueitar')], default=4, null=True), + ), + ] diff --git a/core/migrations/0021_auto_20200806_1711.py b/core/migrations/0021_auto_20200806_1711.py new file mode 100644 index 000000000..4a2a086b1 --- /dev/null +++ b/core/migrations/0021_auto_20200806_1711.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.11.29 on 2020-08-06 17:11 +from __future__ import unicode_literals + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('core', '0020_auto_20200720_1319'), + ] + + operations = [ + migrations.AlterField( + model_name='workrelation', + name='relation', + field=models.CharField(choices=[('translation', 'translation'), ('revision', 'revision'), ('sequel', 'sequel'), ('part', 'part'), ('unspecified', 'unspecified')], max_length=15), + ), + ] diff --git a/core/migrations/0022_auto_20200812_1247.py b/core/migrations/0022_auto_20200812_1247.py new file mode 100644 index 000000000..3a073fb25 --- /dev/null +++ b/core/migrations/0022_auto_20200812_1247.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.11.29 on 2020-08-12 12:47 +from __future__ import unicode_literals + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('core', '0021_auto_20200806_1711'), + ] + + operations = [ + migrations.AlterField( + model_name='ebook', + name='format', + field=models.CharField(choices=[('pdf', 'PDF'), ('epub', 'EPUB'), ('html', 'HTML'), ('text', 'TEXT'), ('mobi', 'MOBI'), ('online', 'Online Only')], max_length=25), + ), + ] diff --git a/core/migrations/0023_auto_20201210_1508.py b/core/migrations/0023_auto_20201210_1508.py new file mode 100644 index 000000000..961943486 --- /dev/null +++ b/core/migrations/0023_auto_20201210_1508.py @@ -0,0 +1,35 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.11.29 on 2020-12-10 15:08 +from __future__ import unicode_literals + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('core', '0022_auto_20200812_1247'), + ] + + operations = [ + migrations.RemoveField( + model_name='userprofile', + name='goodreads_auth_secret', + ), + migrations.RemoveField( + model_name='userprofile', + name='goodreads_auth_token', + ), + migrations.RemoveField( + model_name='userprofile', + name='goodreads_user_id', + ), + migrations.RemoveField( + model_name='userprofile', + name='goodreads_user_link', + ), + migrations.RemoveField( + model_name='userprofile', + name='goodreads_user_name', + ), + ] diff --git a/core/migrations/0024_auto_20210503_1717.py b/core/migrations/0024_auto_20210503_1717.py new file mode 100644 index 000000000..5c60aebfc --- /dev/null +++ b/core/migrations/0024_auto_20210503_1717.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.11.29 on 2021-05-03 17:17 +from __future__ import unicode_literals + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('core', '0023_auto_20201210_1508'), + ] + + operations = [ + migrations.AlterField( + model_name='ebookfile', + name='source', + field=models.URLField(blank=True, max_length=1024, null=True), + ), + ] diff --git a/core/migrations/0025_remove_ebookfile_mobied.py b/core/migrations/0025_remove_ebookfile_mobied.py new file mode 100644 index 000000000..ea4384a78 --- /dev/null +++ b/core/migrations/0025_remove_ebookfile_mobied.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.11.29 on 2022-07-28 06:16 +from __future__ import unicode_literals + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('core', '0024_auto_20210503_1717'), + ] + + operations = [ + migrations.RemoveField( + model_name='ebookfile', + name='mobied', + ), + ] diff --git a/core/migrations/0026_auto_20230105_2031.py b/core/migrations/0026_auto_20230105_2031.py new file mode 100644 index 000000000..5a98cb6d5 --- /dev/null +++ b/core/migrations/0026_auto_20230105_2031.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.11.29 on 2023-01-05 20:31 +from __future__ import unicode_literals + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('core', '0025_remove_ebookfile_mobied'), + ] + + operations = [ + migrations.RemoveField( + model_name='userprofile', + name='twitter_id', + ), + migrations.AlterField( + model_name='userprofile', + name='avatar_source', + field=models.PositiveSmallIntegerField(choices=[(0, 'No Avatar, Please'), (1, 'Gravatar'), (4, 'Unglueitar')], default=4, null=True), + ), + ] diff --git a/core/migrations/0027_subject_num_free.py b/core/migrations/0027_subject_num_free.py new file mode 100644 index 000000000..4bf2acd10 --- /dev/null +++ b/core/migrations/0027_subject_num_free.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.11.29 on 2024-08-19 14:39 +from __future__ import unicode_literals + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('core', '0026_auto_20230105_2031'), + ] + + operations = [ + migrations.AddField( + model_name='subject', + name='num_free', + field=models.IntegerField(default=0), + ), + ] diff --git a/core/migrations/0028_auto_20240819_1450.py b/core/migrations/0028_auto_20240819_1450.py new file mode 100644 index 000000000..6250c73da --- /dev/null +++ b/core/migrations/0028_auto_20240819_1450.py @@ -0,0 +1,27 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.11.29 on 2024-08-19 14:50 +from __future__ import unicode_literals + +from django.db import migrations + + +class Migration(migrations.Migration): + def count_free(apps, schema_editor): + """ + Now that subject has num_free filed, populate it + """ + Subject = apps.get_model('core', 'Subject') + for subject in Subject.objects.all(): + subject.num_free = subject.works.filter(is_free=True).count() + subject.save() + + def noop(apps, schema_editor): + pass + + dependencies = [ + ('core', '0027_subject_num_free'), + ] + + operations = [ + migrations.RunPython(count_free, reverse_code=noop, hints={'core': 'Subject'}), + ] diff --git a/core/migrations/0029_auto_20241122_1525.py b/core/migrations/0029_auto_20241122_1525.py new file mode 100644 index 000000000..9ab5b13ea --- /dev/null +++ b/core/migrations/0029_auto_20241122_1525.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.11.29 on 2024-11-22 15:25 +from __future__ import unicode_literals + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('core', '0028_auto_20240819_1450'), + ] + + operations = [ + migrations.AlterField( + model_name='subject', + name='name', + field=models.CharField(db_index=True, max_length=200, unique=True), + ), + migrations.AlterField( + model_name='work', + name='is_free', + field=models.BooleanField(db_index=True, default=False), + ), + ] diff --git a/core/mobi.py b/core/mobi.py deleted file mode 100644 index b26d43e6b..000000000 --- a/core/mobi.py +++ /dev/null @@ -1,32 +0,0 @@ -import requests -from django.conf import settings - -mobigen_url = settings.MOBIGEN_URL -mobigen_user_id = settings.MOBIGEN_USER_ID -mobigen_password = settings.MOBIGEN_PASSWORD - - - -def convert_to_mobi(input_url, input_format="application/epub+zip"): - - """ - return a string with the output of mobigen computation - - """ - if mobigen_url and mobigen_user_id and mobigen_password: - print 'settings ok' - # using verify=False since at the moment, using a self-signed SSL cert. - - payload = requests.get(input_url).content - - headers = {'Content-Type': input_format} - r = requests.post(mobigen_url, auth=(mobigen_user_id, mobigen_password), - data=payload, headers=headers) - - # if HTTP reponse code is ok, the output is the mobi file; else error message - if r.status_code == 200: - return r.content - else: - print "{0}: {1}".format(r.status_code, r.content) - raise Exception("{0}: {1}".format(r.status_code, r.content)) - diff --git a/core/mobigen.py b/core/mobigen.py deleted file mode 100644 index e3a23fe42..000000000 --- a/core/mobigen.py +++ /dev/null @@ -1,92 +0,0 @@ -""" -Utilities for calling mobigen for management. do not use in application - -""" - -from itertools import islice -from StringIO import StringIO -import uuid - -from django.core.files.storage import default_storage -from django.core.files.base import ContentFile, File - -from regluit.core.models import (Campaign, Ebook) -from regluit.core import parameters -from regluit.core.mobi import convert_to_mobi - - -# compute whether we can apply mobigen to a given edition to produce a mobi file -# need to have an ebook in epub or pdf format -# possible return values: already has a mobi file / can generate a mobi file / not possible - -def edition_mobi_status(edition): - """ - for a given edition, return: - * 1 if there is already a mobi ebook - * 0 if there is none but we have an epub or html to convert from - * -1 for no epub/html to convert from - """ - formats = set([ebook.format for ebook in edition.work.ebooks()]) - if 'mobi' in formats: - return 1 - elif ('epub' in formats) or ('html' in formats): - return 0 - else: - return -1 - - -def write_file_to_storage(file_object, content_type, path): - """ - write file_object to the default_storage at given path - """ - file_s3 = ContentFile(file_object) - file_s3.content_type = content_type - - default_storage.save(path, file_s3) - return file_s3 - - -# generator for editions to add mobi to -# campaigns that can have mobi files but don't yet. - -def editions_to_convert(): - for campaign in Campaign.objects.filter(edition__ebooks__isnull=False).distinct(): - # need to make sure campaign type is not B2U because kindlegen is for books we give awy free of charge - if (edition_mobi_status(campaign.edition) == 0) and (campaign.type != parameters.BUY2UNGLUE): # possible to generate mobi - yield campaign.edition - - -def generate_mobi_ebook_for_edition(edition): - - # pull out the sister edition to convert from - sister_ebook = edition.ebooks.filter(format__in=['epub', 'html'])[0] - - # run the conversion process - - output = convert_to_mobi(sister_ebook.url) - #output = open("/Users/raymondyee/Downloads/hello.mobi").read() - - file_ = write_file_to_storage(output, - "application/x-mobipocket-ebook", - "/ebf/{0}.mobi".format(uuid.uuid4().get_hex())) - - # create a path for the ebookfile: IS THIS NECESSARY? - # https://github.com/Gluejar/regluit/blob/25dcb06f464dc11b5e589ab6859dfcc487f8f3ef/core/models.py#L1771 - - #ebfile = EbookFile(edition=edition, file=file_, format='mobi') - #ebfile.save() - - # maybe need to create an ebook pointing to ebookFile ? - # copy metadata from sister ebook - - ebfile_url = default_storage.url(file_.name) - #print (ebfile_url) - - ebook = Ebook(url=ebfile_url, - format="mobi", - provider="Unglue.it", - rights=sister_ebook.rights, - edition=edition) - ebook.save() - - return ebook diff --git a/core/models/__init__.py b/core/models/__init__.py index 866f0666e..c3fa44a1e 100755 --- a/core/models/__init__.py +++ b/core/models/__init__.py @@ -4,11 +4,11 @@ import math import random import re -import urllib -import urllib2 +from urllib.parse import urlencode, quote_plus +from urllib.request import urlopen from datetime import timedelta, datetime from decimal import Decimal -from tempfile import SpooledTemporaryFile +from tempfile import TemporaryFile import requests from ckeditor.fields import RichTextField @@ -22,11 +22,11 @@ from django.contrib.auth.models import User from django.contrib.sites.models import Site from django.contrib.contenttypes.fields import GenericRelation -from django.core.urlresolvers import reverse from django.core.files.base import ContentFile -from django.db import models +from django.db import models, IntegrityError from django.db.models import F, Q from django.db.models.signals import post_save +from django.urls import reverse from django.utils.timezone import now from django.utils.translation import ugettext_lazy as _ @@ -47,8 +47,6 @@ TRANSACTION_STATUS_FAILED, TRANSACTION_STATUS_INCOMPLETE ) - -from regluit.utils import encryption as crypto from regluit.utils.localdatetime import date_today from regluit.core.parameters import ( @@ -63,10 +61,10 @@ THANKED, OFFER_CHOICES, ACQ_CHOICES, + GOOD_PROVIDERS, ) from regluit.core.epub import personalize, ungluify, ask_epub from regluit.core.pdf import ask_pdf, pdf_append -from regluit.core import mobi from regluit.core.signals import ( successful_campaign, unsuccessful_campaign, @@ -81,7 +79,6 @@ EbookFile, Edition, EditionNote, - good_providers, Identifier, path_for_file, Publisher, @@ -104,32 +101,16 @@ class UnglueitError(RuntimeError): pass -class Key(models.Model): - """an encrypted key store""" - name = models.CharField(max_length=255, unique=True) - encrypted_value = models.TextField(null=True, blank=True) - - def _get_value(self): - return crypto.decrypt_string(binascii.a2b_hex(self.encrypted_value), settings.SECRET_KEY) - - def _set_value(self, value): - self.encrypted_value = binascii.b2a_hex(crypto.encrypt_string(value, settings.SECRET_KEY)) - - value = property(_get_value, _set_value) - - def __unicode__(self): - return "Key with name {0}".format(self.name) - class CeleryTask(models.Model): created = models.DateTimeField(auto_now_add=True) task_id = models.CharField(max_length=255) - user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="tasks", null=True) + user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name="tasks", null=True) description = models.CharField(max_length=2048, null=True) # a description of what the task is function_name = models.CharField(max_length=1024) # used to reconstitute the AsyncTask with which to get status function_args = models.IntegerField(null=True) # not full generalized here -- takes only a single arg for now. active = models.NullBooleanField(default=True) - def __unicode__(self): + def __str__(self): return "Task %s arg:%s ID# %s %s: State %s " % (self.function_name, self.function_args, self.task_id, self.description, self.state) @property @@ -158,7 +139,7 @@ class Premium(models.Model): TIERS = {"supporter":25, "patron":50, "bibliophile":100} #should load this from fixture created = models.DateTimeField(auto_now_add=True) type = models.CharField(max_length=2, choices=PREMIUM_TYPES) - campaign = models.ForeignKey("Campaign", related_name="premiums", null=True) + campaign = models.ForeignKey("Campaign", on_delete=models.CASCADE, related_name="premiums", null=True) amount = models.DecimalField(max_digits=10, decimal_places=0, blank=False) description = models.TextField(null=True, blank=False) limit = models.IntegerField(default=0) @@ -171,7 +152,7 @@ def premium_count(self): def premium_remaining(self): t_model = apps.get_model('payment', 'Transaction') return self.limit - t_model.objects.filter(premium=self).count() - def __unicode__(self): + def __str__(self): return (self.campaign.work.title if self.campaign else '') + ' $' + str(self.amount) class PledgeExtra: @@ -190,10 +171,10 @@ class CampaignAction(models.Model): # anticipated types: activated, withdrawn, suspended, restarted, succeeded, failed, unglued type = models.CharField(max_length=15) comment = models.TextField(null=True, blank=True) - campaign = models.ForeignKey("Campaign", related_name="actions", null=False) + campaign = models.ForeignKey("Campaign", on_delete=models.CASCADE, related_name="actions", null=False) class Offer(models.Model): - work = models.ForeignKey("Work", related_name="offers", null=False) + work = models.ForeignKey("Work", on_delete=models.CASCADE, related_name="offers", null=False) price = models.DecimalField(max_digits=6, decimal_places=2, null=True, blank=False) license = models.PositiveSmallIntegerField(null=False, default=INDIVIDUAL, choices=OFFER_CHOICES) @@ -219,26 +200,26 @@ class Acq(models.Model): expires = models.DateTimeField(null=True) refreshes = models.DateTimeField(auto_now_add=True) refreshed = models.BooleanField(default=True) - work = models.ForeignKey("Work", related_name='acqs', null=False) - user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='acqs') + work = models.ForeignKey("Work", on_delete=models.CASCADE, related_name='acqs', null=False) + user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='acqs') license = models.PositiveSmallIntegerField(null=False, default=INDIVIDUAL, choices=ACQ_CHOICES) - watermarked = models.ForeignKey("booxtream.Boox", null=True) + watermarked = models.ForeignKey("booxtream.Boox", on_delete=models.CASCADE, null=True) nonce = models.CharField(max_length=32, null=True) # when the acq is a loan, this points at the library's acq it's derived from - lib_acq = models.ForeignKey("self", related_name="loans", null=True) + lib_acq = models.ForeignKey("self", on_delete=models.CASCADE, related_name="loans", null=True) class mock_ebook(object): def __init__(self, acq): - self.url = acq.get_mobi_url() - self.format = 'mobi' + self.url = acq.get_epub_url() + self.format = 'epub' self.filesize = 0 def save(self): return True def get_archive(self): try: - r = urllib2.urlopen(self.url) + r = urlopen(self.url) try: self.filesize = int(r.info().getheaders("Content-Length")[0]) except IndexError: @@ -252,7 +233,7 @@ def get_archive(self): def ebook(self): return self.mock_ebook(self) - def __unicode__(self): + def __str__(self): if self.lib_acq: return "%s, %s: %s for %s" % (self.work.title, self.get_license_display(), self.lib_acq.user, self.user) else: @@ -265,10 +246,6 @@ def expired(self): else: return self.expires < datetime.now() - def get_mobi_url(self): - if self.expired: - return '' - return self.get_watermarked().download_link_mobi def get_epub_url(self): if self.expired: @@ -300,7 +277,7 @@ def get_watermarked(self): return self.watermarked def _hash(self): - return hashlib.md5('%s:%s:%s:%s'%(settings.SOCIAL_AUTH_TWITTER_SECRET, self.user_id, self.work_id, self.created)).hexdigest() + return hashlib.md5(bytes('%s:%s:%s'%(self.user_id, self.work_id, self.created), 'utf-8')).hexdigest() def expire_in(self, delta): self.expires = (now() + delta) if delta else now() @@ -326,7 +303,7 @@ def borrow(self, user=None): borrowed = Acq.objects.create(user=user, work=self.work, license=BORROWED, lib_acq=self) from regluit.core.tasks import watermark_acq notification.send([user], "library_borrow", {'acq':borrowed}) - watermark_acq.delay(borrowed) + watermark_acq.delay(borrowed.id) result = borrowed from regluit.core.tasks import emit_notifications emit_notifications.delay() @@ -359,11 +336,11 @@ def config_acq(sender, instance, created, **kwargs): class Hold(models.Model): created = models.DateTimeField(auto_now_add=True) - work = models.ForeignKey("Work", related_name='holds', null=False) - user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='holds', null=False) - library = models.ForeignKey(Library, related_name='holds', null=False) + work = models.ForeignKey("Work", on_delete=models.CASCADE, related_name='holds', null=False) + user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='holds', null=False) + library = models.ForeignKey(Library, on_delete=models.CASCADE, related_name='holds', null=False) - def __unicode__(self): + def __str__(self): return '%s for %s at %s' % (self.work, self.user.username, self.library) def ahead(self): return Hold.objects.filter(work=self.work, library=self.library, created__lt=self.created).count() @@ -391,8 +368,8 @@ class Campaign(models.Model): activated = models.DateTimeField(null=True, db_index=True,) paypal_receiver = models.CharField(max_length=100, blank=True) amazon_receiver = models.CharField(max_length=100, blank=True) - work = models.ForeignKey("Work", related_name="campaigns", null=False) - managers = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name="campaigns", null=False) + work = models.ForeignKey("Work", on_delete=models.CASCADE, related_name="campaigns", null=False) + managers = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name="campaigns") # status: INITIALIZED, ACTIVE, SUSPENDED, WITHDRAWN, SUCCESSFUL, UNSUCCESSFUL status = models.CharField( max_length=15, null=True, blank=False, default="INITIALIZED", db_index=True, choices=STATUS_CHOICES) @@ -401,9 +378,9 @@ class Campaign(models.Model): (BUY2UNGLUE, 'Buy-to-unglue campaign'), (THANKS, 'Thanks-for-ungluing campaign'), )) - edition = models.ForeignKey("Edition", related_name="campaigns", null=True) + edition = models.ForeignKey("Edition", on_delete=models.CASCADE, related_name="campaigns", null=True) email = models.CharField(max_length=100, blank=True) - publisher = models.ForeignKey("Publisher", related_name="campaigns", null=True) + publisher = models.ForeignKey("Publisher", on_delete=models.CASCADE, related_name="campaigns", null=True) do_watermark = models.BooleanField(default=True) use_add_ask = models.BooleanField(default=True) charitable = models.BooleanField(default=False) @@ -412,7 +389,7 @@ def __init__(self, *args, **kwargs): self.problems = [] super(Campaign, self).__init__(*args, **kwargs) - def __unicode__(self): + def __str__(self): try: return u"Campaign for %s" % self.work.title except: @@ -443,7 +420,7 @@ def clone(self): self.activated = None self.update_left() self.save() - self.managers = old_managers + self.managers.set(old_managers) # clone associated premiums for premium in new_premiums: @@ -492,10 +469,10 @@ def launchable(self): self.problems.append(_('A campaign must have a target')) may_launch = False if self.type == BUY2UNGLUE: - if self.work.offers.filter(price__gt=0, active=True).count() == 0: + if not self.work.offers.filter(price__gt=0, active=True).exists(): self.problems.append(_('You can\'t launch a buy-to-unglue campaign before setting a price for your ebooks')) may_launch = False - if EbookFile.objects.filter(edition__work=self.work).count() == 0: + if not EbookFile.objects.filter(edition__work=self.work).exists(): self.problems.append(_('You can\'t launch a buy-to-unglue campaign if you don\'t have any ebook files uploaded')) may_launch = False if (self.cc_date_initial is None) or (self.cc_date_initial > datetime.combine(settings.MAX_CC_DATE, datetime.min.time())) or (self.cc_date_initial < now()): @@ -510,7 +487,7 @@ def launchable(self): may_launch = False if self.type == THANKS: # the case in which there is no EbookFile and no Ebook associated with work (We have ebooks without ebook files.) - if EbookFile.objects.filter(edition__work=self.work).count() == 0 and self.work.ebooks().count() == 0: + if not EbookFile.objects.filter(edition__work=self.work).exists() and not self.work.ebooks().exists(): self.problems.append(_('You can\'t launch a thanks-for-ungluing campaign if you don\'t have any ebook files uploaded')) may_launch = False except Exception as e: @@ -640,10 +617,10 @@ def activate(self): raise UnglueitError(_('Campaign needs to be initialized in order to be activated')) try: active_claim = self.work.claim.filter(status="active")[0] - except IndexError, e: + except IndexError as e: raise UnglueitError(_('Campaign needs to have an active claim in order to be activated')) if not self.launchable: - raise UnglueitError('Configuration issues need to be addressed before campaign is activated: %s' % unicode(self.problems[0])) + raise UnglueitError('Configuration issues need to be addressed before campaign is activated: %s' % str(self.problems[0])) self.status = 'ACTIVE' self.left = self.target self.activated = datetime.today() @@ -721,16 +698,13 @@ def transaction_to_recharge(self, user): # only if a campaign is SUCCESSFUL, we allow for recharged if self.status == 'SUCCESSFUL': - if self.transaction_set.filter(Q(user=user) & (Q(status=TRANSACTION_STATUS_COMPLETE) | Q(status=TRANSACTION_STATUS_ACTIVE))).count(): + if self.transaction_set.filter(Q(user=user) & (Q(status=TRANSACTION_STATUS_COMPLETE) | Q(status=TRANSACTION_STATUS_ACTIVE))).exists(): # presence of an active or complete transaction means no transaction to recharge return None else: transactions = self.transaction_set.filter(Q(user=user) & (Q(status=TRANSACTION_STATUS_ERROR) | Q(status=TRANSACTION_STATUS_FAILED))) # assumption --that the first failed/errored transaction has the amount we need to recharge - if transactions.count(): - return transactions[0] - else: - return None + return transactions.first() else: return None @@ -896,9 +870,9 @@ def countdown(self): if time_remaining.days: countdown = "%s days" % str(time_remaining.days + 1) elif time_remaining.seconds > 3600: - countdown = "%s hours" % str(time_remaining.seconds/3600 + 1) + countdown = "%s hours" % str(time_remaining.seconds // 3600 + 1) elif time_remaining.seconds > 60: - countdown = "%s minutes" % str(time_remaining.seconds/60 + 1) + countdown = "%s minutes" % str(time_remaining.seconds // 60 + 1) else: countdown = "Seconds" @@ -908,17 +882,6 @@ def countdown(self): def latest_ending(cls): return timedelta(days=int(settings.UNGLUEIT_LONGEST_DEADLINE)) + now() - def make_mobis(self): - # make archive files for ebooks, make mobi files for epubs - versions = set() - for ebook in self.work.ebooks().filter(provider__in=good_providers, format='mobi'): - versions.add(ebook.version_label) - for ebook in self.work.ebooks_all().exclude(provider='Unglue.it').filter(provider__in=good_providers, format='epub'): - if not ebook.version_label in versions: - # now make the mobi file - ebf = ebook.get_archive_ebf() - ebf.make_mobi() - def add_ask_to_ebfs(self, position=0): if not self.use_add_ask or self.type != THANKS: return @@ -930,7 +893,7 @@ def add_ask_to_ebfs(self, position=0): ebf.file.open() to_dos.append({'content': ebf.file.read(), 'ebook': ebf.ebook}) format_versions.append(format_version) - for ebook in self.work.ebooks_all().exclude(provider='Unglue.it').filter(provider__in=good_providers): + for ebook in self.work.ebooks_all().exclude(provider='Unglue.it').filter(provider__in=GOOD_PROVIDERS): format_version = '{}_{}'.format(ebook.format, ebook.version_label) if ebook.format in ('pdf', 'epub') and not format_version in format_versions: to_dos.append({'content': ebook.get_archive().read(), 'ebook': ebook}) @@ -942,8 +905,8 @@ def add_ask_to_ebfs(self, position=0): if to_do['ebook'].format == 'pdf': try: added = ask_pdf({'campaign':self, 'work':self.work, 'site':Site.objects.get_current()}) - new_file = SpooledTemporaryFile() - old_file = SpooledTemporaryFile() + new_file = TemporaryFile() + old_file = TemporaryFile() old_file.write(to_do['content']) if position == 0: pdf_append(added, old_file, new_file) @@ -959,7 +922,7 @@ def add_ask_to_ebfs(self, position=0): logger.error("error appending pdf ask %s" % (e)) elif to_do['ebook'].format == 'epub': try: - old_file = SpooledTemporaryFile() + old_file = TemporaryFile() old_file.write(to_do['content']) new_file = ask_epub(old_file, {'campaign':self, 'work':self.work, 'site':Site.objects.get_current()}) new_file.seek(0) @@ -969,17 +932,6 @@ def add_ask_to_ebfs(self, position=0): new_epub_ebf.version = version new_ebfs.append(new_epub_ebf) - # now make the mobi file - new_mobi_ebf = EbookFile.objects.create(edition=edition, format='mobi', asking=True) - try: - new_mobi_file = ContentFile(mobi.convert_to_mobi(new_epub_ebf.file.url)) - except Exception as e: - logger.error("error making mobi for %s" % (new_epub_ebf.file.url)) - raise e - new_mobi_ebf.file.save(path_for_file('ebf', None), new_mobi_file) - new_mobi_ebf.save() - new_mobi_ebf.version = version - new_ebfs.append(new_mobi_ebf) except Exception as e: logger.error("error making epub ask or mobi %s" % (e)) for ebf in new_ebfs: @@ -1024,7 +976,7 @@ def revert_asks(self): format_versions.append(format_version) def make_unglued_ebf(self, format, watermarked): - r = urllib2.urlopen(watermarked.download_link(format)) + r = urlopen(watermarked.download_link(format)) ebf = EbookFile.objects.create(edition=self.work.preferred_edition, format=format) ebf.file.save(path_for_file(ebf, None), ContentFile(r.read())) ebf.file.close() @@ -1035,7 +987,8 @@ def make_unglued_ebf(self, format, watermarked): rights=self.license, provider="Unglue.it", url=settings.BASE_URL_SECURE + reverse('download_campaign', args=[self.work_id, format]), - version='unglued', + version_label='unglued', + filesize=ebf.file.size, ) old_ebooks = Ebook.objects.exclude(pk=ebook.pk).filter( edition=self.work.preferred_edition, @@ -1064,10 +1017,9 @@ def watermark_success(self): 'epub': True, } ungluified = ungluify(self.work.epubfiles()[0].file, self) - ungluified.filename.seek(0) - watermarked = watermarker.platform(epubfile=ungluified.filename, **params) + ungluified.file_obj.seek(0) + watermarked = watermarker.platform(epubfile=ungluified.file_obj, **params) self.make_unglued_ebf('epub', watermarked) - self.make_unglued_ebf('mobi', watermarked) return True return False @@ -1085,17 +1037,21 @@ def marc_records(self): class Wishlist(models.Model): created = models.DateTimeField(auto_now_add=True) - user = models.OneToOneField(settings.AUTH_USER_MODEL, related_name='wishlist') + user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='wishlist') works = models.ManyToManyField('Work', related_name='wishlists', through='Wishes') - def __unicode__(self): + def __str__(self): return "%s's Books" % self.user.username def add_work(self, work, source, notify=False): try: w = Wishes.objects.get(wishlist=self, work=work) - except: - Wishes.objects.create(source=source, wishlist=self, work=work) + except Wishes.DoesNotExist: + try: + Wishes.objects.create(source=source, wishlist=self, work=work) + except IntegrityError: + # threading issue? + return work.update_num_wishes() # only send notification in case of new wishes # and only when they result from user action, not (e.g.) our tests @@ -1118,8 +1074,8 @@ def work_source(self, work): class Wishes(models.Model): created = models.DateTimeField(auto_now_add=True, db_index=True,) source = models.CharField(max_length=15, blank=True, db_index=True,) - wishlist = models.ForeignKey('Wishlist') - work = models.ForeignKey('Work', related_name='wishes') + wishlist = models.ForeignKey('Wishlist', on_delete=models.CASCADE) + work = models.ForeignKey('Work', on_delete=models.CASCADE, related_name='wishes') class Meta: db_table = 'core_wishlist_works' @@ -1130,7 +1086,7 @@ class Badge(models.Model): @property def path(self): return '/static/images/%s.png' % self.name - def __unicode__(self): + def __str__(self): return self.name def pledger(): @@ -1146,10 +1102,10 @@ def pledger2(): pledger2.instance = None ANONYMOUS_AVATAR = '/static/images/header/avatar.png' -(NO_AVATAR, GRAVATAR, TWITTER, FACEBOOK, UNGLUEITAR) = AVATARS +(NO_AVATAR, GRAVATAR, TWITTER, UNGLUEITAR) = AVATARS class Libpref(models.Model): - user = models.OneToOneField(settings.AUTH_USER_MODEL, related_name='libpref') + user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='libpref') marc_link_target = models.CharField( max_length=6, default='UNGLUE', @@ -1159,12 +1115,10 @@ class Libpref(models.Model): class UserProfile(models.Model): created = models.DateTimeField(auto_now_add=True) - user = models.OneToOneField(settings.AUTH_USER_MODEL, related_name='profile') + user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='profile') tagline = models.CharField(max_length=140, blank=True) pic_url = models.URLField(blank=True) home_url = models.URLField(blank=True) - twitter_id = models.CharField(max_length=15, blank=True) - facebook_id = models.BigIntegerField(null=True, blank=True) librarything_id = models.CharField(max_length=31, blank=True) badges = models.ManyToManyField('Badge', related_name='holders', blank=True) kindle_email = models.EmailField(max_length=254, blank=True) @@ -1172,25 +1126,17 @@ class UserProfile(models.Model): # keep track of work the user adds works = models.ManyToManyField('Work', related_name='contributors', blank=True) - goodreads_user_id = models.CharField(max_length=32, null=True, blank=True) - goodreads_user_name = models.CharField(max_length=200, null=True, blank=True) - goodreads_auth_token = models.TextField(null=True, blank=True) - goodreads_auth_secret = models.TextField(null=True, blank=True) - goodreads_user_link = models.CharField(max_length=200, null=True, blank=True) - avatar_source = models.PositiveSmallIntegerField( null=True, default=UNGLUEITAR, choices=( (NO_AVATAR, 'No Avatar, Please'), (GRAVATAR, 'Gravatar'), - (TWITTER, 'Twitter'), - (FACEBOOK, 'Facebook'), (UNGLUEITAR, 'Unglueitar'), ) ) - def __unicode__(self): + def __str__(self): return self.user.username def reset_pledge_badge(self): @@ -1206,24 +1152,19 @@ def reset_pledge_badge(self): @property def pledge_count(self): - return self.user.transaction_set.exclude(status='NONE').exclude(status='Canceled', reason=None).exclude(anonymous=True).count() + return self.user.transaction_set.exclude(status='NONE').exclude( + status='Canceled', reason=None).exclude(anonymous=True).count() @property def account(self): # there should be only one active account per user - accounts = self.user.account_set.filter(date_deactivated__isnull=True) - if accounts.count() == 0: - return None - else: - return accounts[0] + return self.user.account_set.filter(date_deactivated__isnull=True).first() @property def old_account(self): - accounts = self.user.account_set.filter(date_deactivated__isnull=False).order_by('-date_deactivated') - if accounts.count() == 0: - return None - else: - return accounts[0] + return self.user.account_set.filter( + date_deactivated__isnull=False + ).order_by('-date_deactivated').first() @property def pledges(self): @@ -1232,10 +1173,7 @@ def pledges(self): @property def last_transaction(self): from regluit.payment.models import Transaction - try: - return Transaction.objects.filter(user=self.user).order_by('-date_modified')[0] - except IndexError: - return None + return Transaction.objects.filter(user=self.user).order_by('-date_modified').first() @property def ack_name(self): @@ -1252,8 +1190,7 @@ def anon_pref(self): last = self.last_transaction if last: return last.anonymous - else: - return None + return None @property def on_ml(self): @@ -1267,12 +1204,12 @@ def on_ml(self): ) if member['status'] == 'subscribed': return 'True' - except MailChimpError, e: - if e[0]['status'] != 404: # don't log case where user is not on a list + except MailChimpError as e: + if e.args[0]['status'] != 404: # don't log case where user is not on a list logger.error("error getting mailchimp status %s" % (e)) - except ValueError, e: + except ValueError as e: logger.error("bad email address %s" % (self.user.email)) - except Exception, e: + except Exception as e: logger.error("error getting mailchimp status %s" % (e)) return False @@ -1281,7 +1218,7 @@ def ml_subscribe(self, **kwargs): # use @example.org email addresses for testing! return from regluit.core.tasks import ml_subscribe_task - ml_subscribe_task.delay(self, **kwargs) + ml_subscribe_task.delay(self.id, **kwargs) def ml_unsubscribe(self): if "@example.org" in self.user.email: @@ -1293,29 +1230,29 @@ def ml_unsubscribe(self): subscriber_hash=self.user.email, ) return True - except MailChimpError, e: - if e[0]['status'] != 404: # don't log case where user is not on a list + except MailChimpError as e: + if e.args[0]['status'] != 404: # don't log case where user is not on a list logger.error("error getting mailchimp status %s" % (e)) - except Exception, e: + except Exception as e: logger.error("error unsubscribing from mailchimp list %s" % (e)) return False def gravatar(self): # construct the url - gravatar_url = "https://www.gravatar.com/avatar/" + hashlib.md5(self.user.email.lower()).hexdigest() + "?" - gravatar_url += urllib.urlencode({'d':'wavatar', 's':'50'}) + gravatar_url = "https://www.gravatar.com/avatar/" + hashlib.md5(bytes(self.user.email.lower(), 'utf-8')).hexdigest() + "?" + gravatar_url += urlencode({'d':'wavatar', 's':'50'}) return gravatar_url def unglueitar(self): # construct the url - gravatar_url = "https://www.gravatar.com/avatar/" + hashlib.md5(urllib.quote_plus(self.user.username.encode('utf-8')) + '@unglue.it').hexdigest() + "?" - gravatar_url += urllib.urlencode({'d':'wavatar', 's':'50'}) + gravatar_url = "https://www.gravatar.com/avatar/" + hashlib.md5(bytes(quote_plus(self.user.username), 'utf-8') + b'@unglue.it').hexdigest() + "?" + gravatar_url += urlencode({'d':'wavatar', 's':'50'}) return gravatar_url @property def avatar_url(self): - if self.avatar_source is None or self.avatar_source is TWITTER: + if self.avatar_source is None: if self.pic_url: return self.pic_url else: @@ -1324,10 +1261,7 @@ def avatar_url(self): return self.unglueitar() elif self.avatar_source == GRAVATAR: return self.gravatar() - elif self.avatar_source == FACEBOOK and self.facebook_id != None: - return 'https://graph.facebook.com/v2.3/' + str(self.facebook_id) + '/picture?redirect=true' - else: - return ANONYMOUS_AVATAR + return ANONYMOUS_AVATAR @property def social_auths(self): @@ -1359,9 +1293,9 @@ class Press(models.Model): class Gift(models.Model): # the acq will contain the recipient, and the work - acq = models.ForeignKey('Acq', related_name='gifts') + acq = models.ForeignKey('Acq', on_delete=models.CASCADE, related_name='gifts') to = models.CharField(max_length=75, blank=True) # store the email address originally sent to, not necessarily the email of the recipient - giver = models.ForeignKey(User, related_name='gifts') + giver = models.ForeignKey(User, on_delete=models.CASCADE, related_name='gifts') message = models.TextField(max_length=512, default='') used = models.DateTimeField(null=True) diff --git a/core/models/bibmodels.py b/core/models/bibmodels.py index f8b0af23b..954a472d3 100644 --- a/core/models/bibmodels.py +++ b/core/models/bibmodels.py @@ -1,39 +1,42 @@ import logging import math import re -import urllib -import urllib2 +import unicodedata import uuid from decimal import Decimal -import unicodedata -from urlparse import urlparse +from ssl import CertificateError +from urllib.parse import urlparse + +import requests -from sorl.thumbnail import get_thumbnail +from botocore.exceptions import ClientError from PIL import ImageFile + from django.conf import settings from django.contrib.auth.models import User from django.contrib.contenttypes.fields import GenericRelation from django.core.files.base import ContentFile -from django.core.urlresolvers import reverse +from django.urls import reverse from django.db import models from django.db.models import F -from django.db.models.signals import post_save, pre_delete +from django.db.models.signals import m2m_changed, post_save, pre_delete from django.utils.timezone import now from django_comments.models import Comment import regluit from regluit.marc.models import MARCRecord as NewMARC -from questionnaire.models import Landing -from regluit.core import mobi +from regluit.bisac.models import interpret_notation import regluit.core.cc as cc +from regluit.core.covers import (get_thumbnail, + DEFAULT_COVER, DEFAULT_COVER_LARGE, DEFAULT_COVER_SMALL) from regluit.core.epub import test_epub from regluit.core.links import id_url +from regluit.core.loaders.harvest import dl_online from regluit.core.validation import valid_subject - from regluit.core.parameters import ( AGE_LEVEL_CHOICES, BORROWED, @@ -47,13 +50,14 @@ THANKED, THANKS, WORK_IDENTIFIERS, + DOMAIN_TO_PROVIDER, ) # fix truncated file problems per https://stackoverflow.com/questions/12984426/python-pil-ioerror-image-file-truncated-with-big-images ImageFile.LOAD_TRUNCATED_IMAGES = True logger = logging.getLogger(__name__) -good_providers = ('Internet Archive', 'Unglue.it', 'Github', 'OAPEN Library') +dllogger = logging.getLogger('regluit.downloads') def id_for(obj, type): if not obj.pk: @@ -67,8 +71,8 @@ class Identifier(models.Model): # olib, ltwk, goog, gdrd, thng, isbn, oclc, olwk, doab, gtbg, glue, doi type = models.CharField(max_length=4, null=False) value = models.CharField(max_length=250, null=False) - work = models.ForeignKey("Work", related_name="identifiers", null=False) - edition = models.ForeignKey("Edition", related_name="identifiers", null=True) + work = models.ForeignKey("Work", on_delete=models.CASCADE, related_name="identifiers", null=False) + edition = models.ForeignKey("Edition", on_delete=models.CASCADE, related_name="identifiers", null=True) class Meta: unique_together = ("type", "value") @@ -92,7 +96,7 @@ def set(type=None, value=None, edition=None, work=None): identifier.edition = edition identifier.save() others = Identifier.objects.filter(type=type, work=work, edition=edition).exclude(value=value) - if others.count() > 0: + if others.exists(): for other in others: other.delete() return identifier @@ -106,12 +110,12 @@ def get_or_add(type='goog', value=None, edition=None, work=None): i.save() return i - def __unicode__(self): + def __str__(self): return u'{0}:{1}'.format(self.type, self.value) - + def label(self): return ID_CHOICES_MAP.get(self.type, self.type) - + def url(self): return id_url(self.type, self.value) @@ -122,19 +126,21 @@ class Work(models.Model): openlibrary_lookup = models.DateTimeField(null=True, blank=True) num_wishes = models.IntegerField(default=0, db_index=True) description = models.TextField(default='', null=True, blank=True) - selected_edition = models.ForeignKey("Edition", related_name='selected_works', null=True) + selected_edition = models.ForeignKey("Edition", on_delete=models.CASCADE, related_name='selected_works', null=True) # repurposed earliest_publication to actually be publication range publication_range = models.CharField(max_length=50, null=True, blank=True) featured = models.DateTimeField(null=True, blank=True, db_index=True,) - is_free = models.BooleanField(default=False) - landings = GenericRelation(Landing, related_query_name='works') + is_free = models.BooleanField(default=False, db_index=True) related = models.ManyToManyField('self', symmetrical=False, blank=True, through='WorkRelation', related_name='reverse_related') - age_level = models.CharField(max_length=5, choices=AGE_LEVEL_CHOICES, default='', blank=True) + age_level = models.CharField(max_length=5, choices=AGE_LEVEL_CHOICES, default='', blank=True) class Meta: ordering = ['title'] - - def __unicode__(self): + indexes = [ + models.Index(fields=['is_free', 'title']), + ] + + def __str__(self): return self.title def __init__(self, *args, **kwargs): @@ -144,7 +150,7 @@ def __init__(self, *args, **kwargs): def delete(self, cascade=True, *args, **kwargs): if cascade: if self.offers.all() or self.claim.all() or self.campaigns.all() or self.acqs.all() \ - or self.holds.all() or self.landings.all(): + or self.holds.all(): return for wishlist in self.wishlists.all(): wishlist.remove_work(self) @@ -165,9 +171,9 @@ def delete(self, cascade=True, *args, **kwargs): for work_relation in self.works_related_from.all(): work_relation.delete() super(Work, self).delete(*args, **kwargs) # Call the "real" save() method. - + def id_for(self, type): - return id_for(self, type) + return id_for(self, type) # this is NOT recursive! @property def gtbg(self): @@ -233,7 +239,7 @@ def openlibrary_id(self): @property def openlibrary_url(self): return id_url('olwk', self.openlibrary_id) - + def cover_filetype(self): if self.uses_google_cover(): return 'jpeg' @@ -262,12 +268,12 @@ def uses_google_cover(self): def cover_image_large(self): if self.preferred_edition and self.preferred_edition.has_cover_image(): return self.preferred_edition.cover_image_large() - return "/static/images/generic_cover_larger.png" + return DEFAULT_COVER_LARGE def cover_image_small(self): if self.preferred_edition and self.preferred_edition.has_cover_image(): return self.preferred_edition.cover_image_small() - return "/static/images/generic_cover_larger.png" + return DEFAULT_COVER_SMALL def cover_image_thumbnail(self): try: @@ -275,29 +281,29 @@ def cover_image_thumbnail(self): return self.preferred_edition.cover_image_thumbnail() except IndexError: pass - return "/static/images/generic_cover_larger.png" + return DEFAULT_COVER def authors(self): # assumes that they come out in the same order they go in! - if self.preferred_edition and self.preferred_edition.authors.all().count() > 0: + if self.preferred_edition and self.preferred_edition.authors.exists(): return self.preferred_edition.authors.all() for edition in self.editions.all(): - if edition.authors.all().count() > 0: + if edition.authors.exists(): return edition.authors.all() return Author.objects.none() def relators(self): # assumes that they come out in the same order they go in! - if self.preferred_edition and self.preferred_edition.relators.all().count() > 0: + if self.preferred_edition and self.preferred_edition.relators.exists(): return self.preferred_edition.relators.all() for edition in self.editions.all(): - if edition.relators.all().count() > 0: + if edition.relators.exists(): return edition.relators.all() return Relator.objects.none() def author(self): # assumes that they come out in the same order they go in! - if self.relators().count() > 0: + if self.relators().exists(): return self.relators()[0].name return '' @@ -329,7 +335,7 @@ def kindle_safe_title(self): nkfd_form = unicodedata.normalize('NFKD', self.title) #unaccent accented letters for c in nkfd_form: ccat = unicodedata.category(c) - #print ccat + if ccat.startswith('L') or ccat.startswith('N'): # only letters and numbers if ord(c) > 127: safe = safe + '#' #a non latin script letter or number @@ -399,9 +405,9 @@ def percent_unglued(self): status = 6 else: if campaign.type == BUY2UNGLUE: - status = int(6 - 6*campaign.left/campaign.target) + status = int(6 - 6 * campaign.left / campaign.target) else: - status = int(float(campaign.current_total)*6/target) + status = int(float(campaign.current_total) * 6 / target) if status >= 6: status = 6 return status @@ -431,14 +437,14 @@ def mobifiles(self): def pdffiles(self): return EbookFile.objects.filter(edition__work=self, format='pdf').exclude(file='').order_by('-created') - + def versions(self): version_labels = [] for ebook in self.ebooks_all(): if ebook.version_label and not ebook.version_label in version_labels: version_labels.append(ebook.version_label) return version_labels - + def formats(self): fmts = [] for fmt in ['pdf', 'epub', 'mobi', 'html']: @@ -450,7 +456,7 @@ def formats(self): def remove_old_ebooks(self): # this method is triggered after an file upload or new ebook saved old = Ebook.objects.filter(edition__work=self, active=True).order_by('-version_iter', '-created') - + # keep highest version ebook for each format and version label done_format_versions = [] for eb in old: @@ -459,7 +465,7 @@ def remove_old_ebooks(self): eb.deactivate() else: done_format_versions.append(format_version) - + # check for failed uploads. null_files = EbookFile.objects.filter(edition__work=self, file='') for ebf in null_files: @@ -512,11 +518,8 @@ def update_num_wishes(self): self.save() def priority(self): - if self.last_campaign(): - return 5 - freedom = 1 if self.is_free else 0 wishing = int(math.log(self.num_wishes)) + 1 if self.num_wishes else 0 - return min(freedom + wishing, 5) + return min(1 + wishing, 5) def first_oclc(self): if self.preferred_edition is None: @@ -612,7 +615,7 @@ def get_lib_license(self, user): return self.get_user_license(lib_user) def borrowable(self, user): - if user.is_anonymous(): + if user.is_anonymous: return False lib_license = self.get_lib_license(user) if lib_license and lib_license.borrowable: @@ -620,7 +623,7 @@ def borrowable(self, user): return False def lib_thanked(self, user): - if user.is_anonymous(): + if user.is_anonymous: return False lib_license = self.get_lib_license(user) if lib_license and lib_license.thanked: @@ -628,10 +631,10 @@ def lib_thanked(self, user): return False def in_library(self, user): - if user.is_anonymous(): + if user.is_anonymous: return False lib_license = self.get_lib_license(user) - if lib_license and lib_license.acqs.count(): + if lib_license and lib_license.acqs.exists(): return True return False @@ -650,23 +653,15 @@ def __init__(self, acqs): @property def is_active(self): - return self.acqs.filter(expires__isnull=True).count() > 0 or self.acqs.filter(expires__gt=now()).count() > 0 + return self.acqs.filter(expires__isnull=True).exists() or self.acqs.filter(expires__gt=now()).exists() @property def borrowed(self): - loans = self.acqs.filter(license=BORROWED, expires__gt=now()) - if loans.count() == 0: - return None - else: - return loans[0] + return self.acqs.filter(license=BORROWED, expires__gt=now()).first() @property def purchased(self): - purchases = self.acqs.filter(license=INDIVIDUAL, expires__isnull=True) - if purchases.count() == 0: - return None - else: - return purchases[0] + return self.acqs.filter(license=INDIVIDUAL, expires__isnull=True).first() @property def lib_acqs(self): @@ -675,24 +670,20 @@ def lib_acqs(self): @property def next_acq(self): """ This is the next available copy in the user's libraries""" - loans = self.acqs.filter(license=LIBRARY, refreshes__gt=now()).order_by('refreshes') - if loans.count() == 0: - return None - else: - return loans[0] + return self.acqs.filter(license=LIBRARY, + refreshes__gt=now()).order_by('refreshes').first() @property def borrowable(self): - return self.acqs.filter(license=LIBRARY, refreshes__lt=now()).count() > 0 + return self.acqs.filter(license=LIBRARY, refreshes__lt=now()).exists() @property def thanked(self): - return self.acqs.filter(license=THANKED).count() > 0 + return self.acqs.filter(license=THANKED).exists() @property def borrowable_acq(self): - for acq in self.acqs.filter(license=LIBRARY, refreshes__lt=now()): - return acq + return self.acqs.filter(license=LIBRARY, refreshes__lt=now()).first() @property def is_duplicate(self): @@ -706,7 +697,7 @@ def get_user_license(self, user): if user is None: return None if hasattr(user, 'is_anonymous'): - if user.is_anonymous(): + if user.is_anonymous: return None return self.user_license(self.acqs.filter(user=user)) else: @@ -732,8 +723,8 @@ def marc_records(self): return record_list class WorkRelation(models.Model): - to_work = models.ForeignKey('Work', related_name='works_related_to') - from_work= models.ForeignKey('Work', related_name='works_related_from') + to_work = models.ForeignKey('Work', on_delete=models.CASCADE, related_name='works_related_to') + from_work= models.ForeignKey('Work', on_delete=models.CASCADE, related_name='works_related_from') relation = models.CharField(max_length=15, choices=TEXT_RELATION_CHOICES) @@ -742,7 +733,12 @@ class Author(models.Model): name = models.CharField(max_length=255, unique=True) editions = models.ManyToManyField("Edition", related_name="authors", through="Relator") - def __unicode__(self): + class Meta: + indexes = [ + models.Index(fields=['name']), + ] + + def __str__(self): return self.name @property @@ -766,9 +762,9 @@ class Relation(models.Model): name = models.CharField(max_length=30, blank=True,) class Relator(models.Model): - relation = models.ForeignKey('Relation', default=1) #first relation should have code='aut' - author = models.ForeignKey('Author') - edition = models.ForeignKey('Edition', related_name='relators') + relation = models.ForeignKey('Relation', on_delete=models.CASCADE, default=1) #first relation should have code='aut' + author = models.ForeignKey('Author', on_delete=models.CASCADE) + edition = models.ForeignKey('Edition', on_delete=models.CASCADE, related_name='relators') class Meta: db_table = 'core_author_editions' @@ -787,28 +783,34 @@ def set(self, relation_code): except Relation.DoesNotExist: logger.warning("relation not found: code = %s" % relation_code) +AUTHMATCH = re.compile(r'\s*!([a-z]+):?\s+(.*)') + class Subject(models.Model): created = models.DateTimeField(auto_now_add=True) - name = models.CharField(max_length=200, unique=True) + name = models.CharField(max_length=200, unique=True, db_index=True) works = models.ManyToManyField("Work", related_name="subjects") is_visible = models.BooleanField(default=True) authority = models.CharField(max_length=10, blank=False, default="") + num_free = models.IntegerField(default=0) class Meta: ordering = ['name'] + indexes = [ + models.Index(fields=['name']), + ] @classmethod def set_by_name(cls, subject, work=None, authority=None): ''' use this method whenever you would be creating a new subject!''' subject = subject.strip() - + # make sure it's not a ; delineated list subjects = subject.split(';') for additional_subject in subjects[1:]: cls.set_by_name(additional_subject, work, authority) subject = subjects[0] # make sure there's no heading - headingmatch = re.match(r'^!(.+):(.+)', subject) + headingmatch = AUTHMATCH.match(subject) if headingmatch: subject = headingmatch.group(2).strip() authority = headingmatch.group(1).strip() @@ -820,19 +822,20 @@ def set_by_name(cls, subject, work=None, authority=None): subject = subject[6:].split('=')[0].replace('_', ' ').strip().capitalize() subject = 'Award Winner - {}'.format(subject) authority = 'award' - + if authority == 'bisacsh': + subject = interpret_notation(subject) if valid_subject(subject): (subject_obj, created) = cls.objects.get_or_create(name=subject) if not subject_obj.authority and authority: subject_obj.authority = authority - subject_obj.save() - + subject_obj.works.add(work) - return subject_obj + subject_obj.count_free() + return subject_obj else: return None - - def __unicode__(self): + + def __str__(self): return self.name @@ -842,18 +845,29 @@ def kw(self): def free_works(self): return self.works.filter(is_free=True) + + def count_free(self, force=False): + if self.is_visible or force: + self.num_free = self.works.filter(is_free=True).count() + self.save() + class Edition(models.Model): created = models.DateTimeField(auto_now_add=True) title = models.CharField(max_length=1000) - publisher_name = models.ForeignKey("PublisherName", related_name="editions", null=True, blank=True) + publisher_name = models.ForeignKey("PublisherName", on_delete=models.CASCADE, related_name="editions", null=True, blank=True) publication_date = models.CharField(max_length=50, null=True, blank=True, db_index=True) - work = models.ForeignKey("Work", related_name="editions", null=True) + work = models.ForeignKey("Work", on_delete=models.CASCADE, related_name="editions", null=True) cover_image = models.URLField(null=True, blank=True) unglued = models.BooleanField(default=False) - note = models.ForeignKey("EditionNote", null=True, blank=True) + note = models.ForeignKey("EditionNote", on_delete=models.CASCADE, null=True, blank=True) + + class Meta: + indexes = [ + models.Index(fields=['work']), + ] - def __unicode__(self): + def __str__(self): if self.isbn_13: return "%s (ISBN %s) %s" % (self.title, self.isbn_13, self.publisher) if self.oclc: @@ -867,42 +881,37 @@ def cover_image_large(self): #550 pixel high image if self.cover_image: im = get_thumbnail(self.cover_image, 'x550', crop='noop', quality=95) - if im.exists(): + if not im.is_default: return im.url - elif self.googlebooks_id: + if self.googlebooks_id: url = "https://encrypted.google.com/books?id=%s&printsec=frontcover&img=1&zoom=0" % self.googlebooks_id im = get_thumbnail(url, 'x550', crop='noop', quality=95) - if not im.exists() or im.storage.size(im.name) == 16392: # check for "image not available" image + if im.is_default or im.storage.size(im.name) == 16392: # check for "image not available" image url = "https://encrypted.google.com/books?id=%s&printsec=frontcover&img=1&zoom=1" % self.googlebooks_id im = get_thumbnail(url, 'x550', crop='noop', quality=95) - if im.exists(): + if not im.is_default: return im.url - else: - return '' - else: - return '' + return DEFAULT_COVER_LARGE def cover_image_small(self): #80 pixel high image if self.cover_image: im = get_thumbnail(self.cover_image, 'x80', crop='noop', quality=95) - if im.exists(): + if not im.is_default: return im.url if self.googlebooks_id: return "https://encrypted.google.com/books?id=%s&printsec=frontcover&img=1&zoom=5" % self.googlebooks_id - else: - return '' + return DEFAULT_COVER_SMALL def cover_image_thumbnail(self): #128 pixel wide image if self.cover_image: im = get_thumbnail(self.cover_image, '128', crop='noop', quality=95) - if im.exists(): + if not im.is_default: return im.url if self.googlebooks_id: return "https://encrypted.google.com/books?id=%s&printsec=frontcover&img=1&zoom=1" % self.googlebooks_id - else: - return '' + return DEFAULT_COVER def has_cover_image(self): if self.cover_image: @@ -999,18 +1008,18 @@ def authnames(self): @property def license(self): try: - return self.ebooks.all()[0].rights + return self.ebooks.first().rights except: return None @property def funding_info(self): - if self.ebooks.all().count() == 0: + if not self.ebooks.exists(): return '' if self.unglued: return 'The book is available as a free download thanks to the generous support of interested readers and organizations, who made donations using the crowd-funding website Unglue.it.' else: - if self.ebooks.all()[0].rights in cc.LICENSE_LIST: + if self.ebooks.first().rights in cc.LICENSE_LIST: return 'The book is available as a free download thanks to a Creative Commons license.' else: return 'The book is available as a free download because it is in the Public Domain.' @@ -1021,25 +1030,25 @@ def description(self): class EditionNote(models.Model): note = models.CharField(max_length=64, null=True, blank=True, unique=True) - def __unicode__(self): + def __str__(self): return self.note class Publisher(models.Model): created = models.DateTimeField(auto_now_add=True) - name = models.ForeignKey('PublisherName', related_name='key_publisher') + name = models.ForeignKey('PublisherName', on_delete=models.CASCADE, related_name='key_publisher') url = models.URLField(max_length=1024, null=True, blank=True) logo_url = models.URLField(max_length=1024, null=True, blank=True) description = models.TextField(default='', null=True, blank=True) - def __unicode__(self): + def __str__(self): return self.name.name class PublisherName(models.Model): name = models.CharField(max_length=255, blank=False, unique=True) - publisher = models.ForeignKey('Publisher', related_name='alternate_names', null=True) + publisher = models.ForeignKey('Publisher', on_delete=models.CASCADE, related_name='alternate_names', null=True) - def __unicode__(self): + def __str__(self): return self.name def save(self, *args, **kwargs): @@ -1052,17 +1061,17 @@ def save(self, *args, **kwargs): class WasWork(models.Model): - work = models.ForeignKey('Work') + work = models.ForeignKey('Work', on_delete=models.CASCADE) was = models.IntegerField(unique=True) moved = models.DateTimeField(auto_now_add=True) - user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True) + user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, null=True) def safe_get_work(work_id): """ use this rather than querying the db directly for a work by id """ try: - work = Work.objects.get(id=work_id) + work = Work.objects.select_related('selected_edition').get(id=work_id) except Work.DoesNotExist: try: work = WasWork.objects.get(was=work_id).work @@ -1074,17 +1083,16 @@ def safe_get_work(work_id): return work def path_for_file(instance, filename): - return "ebf/{}.{}".format(uuid.uuid4().get_hex(), instance.format) + return f"ebf/{uuid.uuid4().hex}.{instance.format}" class EbookFile(models.Model): file = models.FileField(upload_to=path_for_file) format = models.CharField(max_length=25, choices=settings.FORMATS) - edition = models.ForeignKey('Edition', related_name='ebook_files') + edition = models.ForeignKey('Edition', on_delete=models.CASCADE, related_name='ebook_files') created = models.DateTimeField(auto_now_add=True) asking = models.BooleanField(default=False) - ebook = models.ForeignKey('Ebook', related_name='ebook_files', null=True) - source = models.URLField(null=True, blank=True) - mobied = models.IntegerField(default=0) #-1 indicates a failed conversion attempt + ebook = models.ForeignKey('Ebook', on_delete=models.CASCADE, related_name='ebook_files', null=True) + source = models.URLField(max_length=1024, null=True, blank=True) version = None def check_file(self): if self.format == 'epub': @@ -1098,47 +1106,14 @@ def active(self): except: return False - def make_mobi(self): - if not self.format == 'epub' or not settings.MOBIGEN_URL: - return False - if self.mobied < 0: - return False - try: - mobi_cf = ContentFile(mobi.convert_to_mobi(self.file.url)) - except: - self.mobied = -1 - self.save() - return False - new_mobi_ebf = EbookFile.objects.create( - edition=self.edition, - format='mobi', - asking=self.asking, - source=self.file.url - ) - new_mobi_ebf.file.save(path_for_file(new_mobi_ebf, None), mobi_cf) - new_mobi_ebf.save() - if self.ebook: - new_ebook = Ebook.objects.create( - edition=self.edition, - format='mobi', - provider='Unglue.it', - url=new_mobi_ebf.file.url, - rights=self.ebook.rights, - version_label=self.ebook.version_label, - version_iter=self.ebook.version_iter, - ) - new_mobi_ebf.ebook = new_ebook - new_mobi_ebf.save() - self.mobied = 1 - self.save() - return True - send_to_kindle_limit = 7492232 class Ebook(models.Model): url = models.URLField(max_length=1024) #change to unique? created = models.DateTimeField(auto_now_add=True, db_index=True,) - format = models.CharField(max_length=25, choices=settings.FORMATS, blank=False) + format = models.CharField(max_length=25, + choices=settings.FORMATS + (('online', 'Online Only'),), + blank=False) provider = models.CharField(max_length=255) download_count = models.IntegerField(default=0) active = models.BooleanField(default=True) @@ -1148,8 +1123,8 @@ class Ebook(models.Model): # use 'PD-US', 'CC BY', 'CC BY-NC-SA', 'CC BY-NC-ND', 'CC BY-NC', 'CC BY-ND', 'CC BY-SA', 'CC0' rights = models.CharField(max_length=255, null=True, choices=cc.CHOICES, db_index=True) - edition = models.ForeignKey('Edition', related_name='ebooks') - user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True) + edition = models.ForeignKey('Edition', on_delete=models.CASCADE, related_name='ebooks') + user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, null=True) def kindle_sendable(self): if not self.filesize or self.filesize < send_to_kindle_limit: @@ -1172,37 +1147,15 @@ def get_archive(self): # returns an open file return ebf.file def get_archive_ebf(self): # returns an ebf - if not self.ebook_files.filter(asking=False): - if not self.provider in good_providers: - return None - try: - r = urllib2.urlopen(self.url) - try: - self.filesize = int(r.info().getheaders("Content-Length")[0]) - if self.save: - self.filesize = self.filesize if self.filesize < 2147483647 else 2147483647 # largest safe positive integer - self.save() - ebf = EbookFile.objects.create( - edition=self.edition, - ebook=self, - format=self.format, - source=self.url - ) - ebf.file.save(path_for_file(ebf, None), ContentFile(r.read())) - ebf.file.close() - ebf.save() - return ebf - except IndexError: - # response has no Content-Length header probably a bad link - logging.error('Bad link error: {}'.format(self.url)) - except IOError: - logger.error(u'could not open {}'.format(self.url)) + if self.ebook_files.filter(asking=False): + ebf = self.ebook_files.filter(asking=False).last() + elif EbookFile.objects.filter(source=self.url, format=self.format): + ebf = self.ebook_files.filter(asking=False).last() else: - ebf = self.ebook_files.filter(asking=False).order_by('-created')[0] - if not self.filesize: - self.filesize = ebf.file.size - self.save() - return ebf + ebf, num = dl_online(self, format=self.format, force=True) + if not ebf: + return None + return ebf def set_provider(self): self.provider = Ebook.infer_provider(self.url) @@ -1214,7 +1167,7 @@ def version(self): return '.{}'.format(self.version_iter) else: return '().{}'.format(self.version_label, self.version_iter) - + def set_version(self, version): #set both version_label and version_iter with one string with format "version.iter" version_pattern = r'(.*)\.(\d+)$' @@ -1224,11 +1177,11 @@ def set_version(self, version): else: self.version_label = version self.save() - + def set_next_iter(self): # set the version iter to the next unused iter for that version for ebook in Ebook.objects.filter( - edition=self.edition, + edition=self.edition, version_label=self.version_label, format=self.format, provider=self.provider @@ -1237,7 +1190,7 @@ def set_next_iter(self): break self.version_iter = iter + 1 self.save() - + @property def rights_badge(self): if self.rights is None: @@ -1266,11 +1219,21 @@ def infer_provider(url): elif re.match(r'https?://www\.oapen\.org/download', url): provider = 'OAPEN Library' else: - provider = None + netloc = urlparse(url).netloc.lower() + if netloc in [u'dx.doi.org', u'doi.org', u'hdl.handle.net']: + try: + url = requests.get(url).url + except requests.exceptions.SSLError: + url = requests.get(url, verify=False).url + netloc = urlparse(url).netloc + if netloc.startswith('www.'): + netloc = netloc[4:] + provider = DOMAIN_TO_PROVIDER.get(netloc, netloc) return provider def increment(self): - Ebook.objects.filter(id=self.id).update(download_count=F('download_count') +1) + #Ebook.objects.filter(id=self.id).update(download_count=F('download_count') +1) + dllogger.info(f'{self.id}') @property def download_url(self): @@ -1279,7 +1242,7 @@ def download_url(self): def is_direct(self): return self.provider not in ('Google Books', 'Project Gutenberg') - def __unicode__(self): + def __str__(self): return "%s (%s from %s)" % (self.edition.title, self.format, self.provider) def deactivate(self): @@ -1295,20 +1258,42 @@ def set_free_flag(sender, instance, created, **kwargs): if not instance.edition.work.is_free and instance.active: instance.edition.work.is_free = True instance.edition.work.save() - elif not instance.active and instance.edition.work.is_free and instance.edition.work.ebooks().count() == 0: + for subject in instance.edition.work.subjects.all(): + subject.count_free() + elif not instance.active and instance.edition.work.is_free and not instance.edition.work.ebooks().exists(): instance.edition.work.is_free = False instance.edition.work.save() - elif instance.active and not instance.edition.work.is_free and instance.edition.work.ebooks().count() > 0: + for subject in instance.edition.work.subjects.all(): + subject.count_free() + + elif instance.active and not instance.edition.work.is_free and instance.edition.work.ebooks().exists(): instance.edition.work.is_free = True instance.edition.work.save() + for subject in instance.edition.work.subjects.all(): + subject.count_free() post_save.connect(set_free_flag, sender=Ebook) def reset_free_flag(sender, instance, **kwargs): # if the Work associated with the instance Ebook currenly has only 1 Ebook, then it's no longer a free Work # once the instance Ebook is deleted. - if instance.edition.work.ebooks().count() == 1: + if instance.active and instance.edition.work.ebooks().count() == 1: instance.edition.work.is_free = False instance.edition.work.save() + for subject in instance.edition.work.subjects.all(): + if subject.num_free > 0: + Subject.objects.filter(id=subject.id).update(num_free=F('num_free') - 1) pre_delete.connect(reset_free_flag, sender=Ebook) + +def check_free(sender, instance, action, model, pk_set, reverse, **kwargs): + if action in ['post_add', 'post_delete']: + if reverse: + for pk in pk_set: + subject = model.objects.get(pk=pk) + subject.count_free() + else: + instance.count_free() + +m2m_changed.connect(check_free, sender=Work.subjects.through) +m2m_changed.connect(check_free, sender=Subject.works.through) diff --git a/core/models/loader.py b/core/models/loader.py new file mode 100644 index 000000000..5256e6cb9 --- /dev/null +++ b/core/models/loader.py @@ -0,0 +1,186 @@ +import logging +import re +import requests +import time +from urllib.parse import quote, unquote, urlparse, urlsplit, urlunsplit + +from django.apps import apps +from django.conf import settings +from django.core.files.base import ContentFile +from django.forms import ValidationError + +from regluit.core.validation import test_file +from regluit.core import models +#from . import Ebook, EbookFile + +#Ebook = apps.get_model('core', 'Ebook') +#EbookFile = apps.get_model('core', 'EbookFile') + +logger = logging.getLogger(__name__) + +def type_for_url(url, content_type=None, force=False, disposition=''): + url_disp = url + disposition + if not url: + return '' + + # check to see if we already know + for ebook in models.Ebook.objects.filter(url=url): + if ebook.format != 'online': + return ebook.format + + if not force: + if url.find('books.openedition.org') >= 0: + return 'online' + if content_type: + ct = content_type + else: + ct, disposition = contenttyper.calc_type(url) + url_disp = url + disposition + binary_type = re.search("octet-stream", ct) or re.search("application/binary", ct) + if re.search("pdf", ct): + return "pdf" + elif binary_type and re.search("pdf", url_disp, flags=re.I): + return "pdf" + elif binary_type and re.search("epub", url_disp, flags=re.I): + return "epub" + elif binary_type and re.search("mobi", url_disp, flags=re.I): + return "mobi" + elif re.search("text/plain", ct): + return "text" + elif re.search("text/html", ct): + if url.find('oapen.org/view') >= 0: + return "html" + return "online" + elif re.search("epub", ct): + return "epub" + elif re.search("mobi", ct): + return "mobi" + elif ct == '404': + return ct + # no content-type header! + elif ct == '' and re.search("epub", url_disp, flags=re.I): + return "epub" + elif ct == '' and re.search("pdf", url_disp, flags=re.I): + return "pdf" + elif ct == '' and re.search("mobi", url_disp, flags=re.I): + return "mobi" + + return "other" + +def requote(url): + # fallback for non-ascii, non-utf8 bytes in redirect location + (scheme, netloc, path, query, fragment) = urlsplit(url) + try: + newpath = quote(unquote(path), encoding='latin1') + except UnicodeEncodeError as uee: + return '' + return urlunsplit((scheme, netloc, newpath, query, fragment)) + +class ContentTyper(object): + """ """ + def __init__(self): + self.last_call = dict() + + def content_type(self, url): + def handle_ude(url, ude): + url = requote(url) + try: + r = requests.get(url, allow_redirects=True) + except: + logger.error('Error processing %s after unicode error', url) + return '', '' + try: + try: + r = requests.head(url, allow_redirects=True) + if r.status_code == 405: + try: + r = requests.get(url) + except UnicodeDecodeError as ude: + if 'utf-8' in str(ude): + return handle_ude(url, ude) + except UnicodeDecodeError as ude: + if 'utf-8' in str(ude): + return handle_ude(url, ude) + except requests.exceptions.SSLError: + try: + r = requests.get(url, verify=False) + except: + logger.error('Error processing %s verification off', url) + return '', '' + except: + logger.error('Error processing %s', url) + return '', '' + if r.status_code == 404: + logger.error('File not found (404) for %s', url) + return '404', '' + return r.headers.get('content-type', ''), r.headers.get('content-disposition', '') + + def calc_type(self, url): + logger.info(url) + # is there a delay associated with the url + netloc = urlparse(url).netloc + delay = 0.1 if 'oapen.org' in netloc else 1 + + # wait if necessary + last_call = self.last_call.get(netloc) + if last_call is not None: + now = time.time() + min_time_next_call = last_call + delay + if min_time_next_call > now: + time.sleep(min_time_next_call-now) + + self.last_call[netloc] = time.time() + + # compute the content-type + return self.content_type(url) + +contenttyper = ContentTyper() + +def load_ebookfile(url, format, user_agent=settings.USER_AGENT, method='GET', verify=True): + ''' + return a ContentFile, format if a new ebook has been loaded + ''' + ebfs = models.EbookFile.objects.filter(source=url) + if ebfs: + return None, '' + try: + if method == 'POST': + response = requests.post(url, headers={"User-Agent": user_agent}, verify=verify) + else: + response = requests.get(url, headers={"User-Agent": user_agent}, verify=verify) + + except requests.exceptions.SSLError: + logger.error('bad certificate? for %s', url) + return None, '' + except IOError as e: + logger.error('could not open %s', url) + return None, '' + except UnicodeDecodeError as e: + logger.error('decoding error for %s', url) + url = requote(url) + try: + response = requests.get(url, headers={"User-Agent": user_agent}, verify=verify) + except: + return None, '' + + if response.status_code == 200: + logger.debug(response.headers.get('content-type', '')) + resp_format = type_for_url(url, + content_type=response.headers.get('content-type', ''), + disposition=response.headers.get('content-disposition', '')) + if resp_format == 'online' or (format != 'online' and resp_format != format): + logger.warning('response format %s for %s is not correct', resp_format, url) + return None, resp_format + else: + logger.warning('couldn\'t get %s', url) + return None, '' + + contentfile = ContentFile(response.content) + try: + test_file(contentfile, resp_format) + return contentfile, resp_format + except ValidationError as e: + logger.error('downloaded %s was not a valid %s', url, format) + None, resp_format + + diff --git a/core/models/rh_models.py b/core/models/rh_models.py index 72ca471b1..a725941a7 100644 --- a/core/models/rh_models.py +++ b/core/models/rh_models.py @@ -13,9 +13,9 @@ class Claim(models.Model): (u'release', u'Claim has not been accepted.'), ) created = models.DateTimeField(auto_now_add=True) - rights_holder = models.ForeignKey("RightsHolder", related_name="claim", null=False) - work = models.ForeignKey("Work", related_name="claim", null=False) - user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="claim", null=False) + rights_holder = models.ForeignKey("RightsHolder", on_delete=models.CASCADE, related_name="claim", null=False) + work = models.ForeignKey("Work", on_delete=models.CASCADE, related_name="claim", null=False) + user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name="claim", null=False) status = models.CharField(max_length=7, choices=STATUSES, default='active') @property @@ -33,7 +33,7 @@ def can_open_new(self): return 2 # can open a THANKS campaign return 1 # can open any type of campaign - def __unicode__(self): + def __str__(self): return self.work.title @property @@ -66,7 +66,7 @@ class RightsHolder(models.Model): created = models.DateTimeField(auto_now_add=True) email = models.CharField(max_length=100, blank=False, default='') rights_holder_name = models.CharField(max_length=100, blank=False) - owner = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="rights_holder", null=False) + owner = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name="rights_holder", null=False) approved = models.BooleanField(default=False) address = models.CharField(max_length=400, blank=False, default='') mailing = models.CharField(max_length=400, blank=False, default='') @@ -76,7 +76,7 @@ class RightsHolder(models.Model): signer_title = models.CharField(max_length=30, blank=False, default='') signature = models.CharField(max_length=100, blank=False, default='' ) - def __unicode__(self): + def __str__(self): return self.rights_holder_name def notify_rh(sender, created, instance, **kwargs): diff --git a/core/parameters.py b/core/parameters.py index a7fffae87..4c410e8ad 100644 --- a/core/parameters.py +++ b/core/parameters.py @@ -1,8 +1,10 @@ (REWARDS, BUY2UNGLUE, THANKS) = (1, 2, 3) (INDIVIDUAL, LIBRARY, BORROWED, RESERVE, THANKED) = (1, 2, 3, 4, 5) TESTING = 0 -OFFER_CHOICES = ((INDIVIDUAL,'Individual license'),(LIBRARY,'Library License')) -ACQ_CHOICES = ((INDIVIDUAL,'Individual license'),(LIBRARY,'Library License'),(BORROWED,'Borrowed from Library'), (TESTING,'Just for Testing'), (RESERVE,'On Reserve'),(THANKED,'Already Thanked'),) +OFFER_CHOICES = ((INDIVIDUAL, 'Individual license'),(LIBRARY, 'Library License')) +ACQ_CHOICES = ((INDIVIDUAL, 'Individual license'), (LIBRARY, 'Library License'), + (BORROWED, 'Borrowed from Library'), (TESTING, 'Just for Testing'), + (RESERVE, 'On Reserve'), (THANKED, 'Already Thanked'),) AGE_LEVEL_CHOICES = ( ('', 'No Rating'), @@ -16,25 +18,28 @@ ('18-', 'Adult/Advanced Reader') ) +DOWNLOADABLE = ('pdf', 'epub', 'mobi') + TEXT_RELATION_CHOICES = ( ('translation', 'translation'), ('revision', 'revision'), ('sequel', 'sequel'), - ('part', 'part') + ('part', 'part'), + ('unspecified', 'unspecified') ) ID_CHOICES = ( - ('http', 'Web Address'), + ('http', 'Web Address'), ('isbn', 'ISBN'), - ('doab', 'DOABooks ID'), + ('doab', 'DOABooks handle'), ('gtbg', 'Project Gutenberg Number'), ('doi', 'Digital Object Identifier'), ('oclc', 'OCLC Number'), ('goog', 'Google Books ID'), - ('gdrd', 'Goodreads ID'), ('thng', 'Library Thing ID'), ('olwk', 'Open Library Work ID'), ('ltwk', 'Library Thing Work ID'), + ('oapn', 'OAPEN ID'), ) OTHER_ID_CHOICES = ( @@ -42,12 +47,70 @@ ('edid', 'pragmatic edition ID'), ) -WORK_IDENTIFIERS = ('doi','olwk','glue','ltwk', 'http', 'doab') +WORK_IDENTIFIERS = ('doi', 'olwk', 'glue', 'ltwk', 'http', 'doab') ID_CHOICES_MAP = dict(ID_CHOICES) +GOOD_PROVIDERS = ('Internet Archive', 'Unglue.it', 'Github', 'OAPEN Library', 'SciELO') +DOMAIN_TO_PROVIDER = dict([ + [u'adelaide.edu.au', u'University of Adelaide'], + [u'aliprandi.org', u'Simone Aliprandi'], + [u'antilia.to.it', u'antilia.to.it'], + [u'antropologie.zcu.cz', u'AntropoWeb'], + [u'aupress.ca', u'Athabasca University Press'], + [u'bloomsburyacademic.com', u'Bloomsbury Academic'], + [u'books.mdpi.com', u'MDPI Books'], + [u'books.openedition.org', u'OpenEdition Books'], + [u'books.scielo.org', u'SciELO'], + [u'ccdigitalpress.org', u'Computers and Composition Digital Press'], + [u'co-action.net', u'Co-Action Publishing'], + [u'degruyter.com', u'De Gruyter Online'], + [u'digitalcommons.usu.edu', u'DigitalCommons, Utah State University'], + [u'dl.dropboxusercontent.com', u'Dropbox'], + [u'doabooks.org', u'Directory of Open Access Books'], + [u'doi.org', u'DOI Resolver'], + [u'dropbox.com', u'Dropbox'], + [u'dspace.ucalgary.ca', u'Institutional Repository at the University of Calgary'], + [u'dx.doi.org', u'DOI Resolver'], + [u'ebooks.iospress.nl', u'IOS Press Ebooks'], + [u'hdl.handle.net', u'Handle Proxy'], + [u'hw.oeaw.ac.at', u'Austrian Academy of Sciences'], + [u'img.mdpi.org', u'MDPI Books'], + [u'ledibooks.com', u'LediBooks'], + [u'ledizioni.it', u'Ledizioni'], + [u'leo.cilea.it', u'LEO '], + [u'leo.cineca.it', u'Letteratura Elettronica Online'], + [u'library.oapen.org', u'OAPEN Library'], + [u'link.springer.com', u'Springer'], + [u'maestrantonella.it', u'maestrantonella.it'], + [u'oapen.org', u'OAPEN Library'], + [u'openbookpublishers.com', u'Open Book Publishers'], + [u'palgraveconnect.com', u'Palgrave Connect'], + [u'press.openedition.org', u'OpenEdition Press'], + [u'scribd.com', u'Scribd'], + [u'springerlink.com', u'Springer'], + [u'transcript-verlag.de', u'Transcript-Verlag'], + [u'ubiquitypress.com', u'Ubiquity Press'], + [u'unglueit-files.s3.amazonaws.com', u'Unglue.it'], + [u'unimib.it', u'University of Milano-Bicocca'], + [u'unito.it', u"University of Turin"], + [u'windsor.scholarsportal.info', u'Scholars Portal'], +]) +ORDER_BY_KEYS = { + 'newest':['-featured', '-created'], + 'oldest':['created'], + 'featured':['-featured', '-num_wishes'], + 'popular':['-num_wishes'], + 'title':['title'], + 'none':[], #no ordering +} +MAX_FACETS = 2 +DONATION_CHOICES = ( + ('general', 'The FEF General Fund'), + ('monographs', 'The FEF Open Access Monographs Fund'), +) diff --git a/core/pdf.py b/core/pdf.py index 0931e46b6..b43430708 100644 --- a/core/pdf.py +++ b/core/pdf.py @@ -2,34 +2,36 @@ Utilities that manipulate pdf files """ import logging +from io import BytesIO, StringIO +from tempfile import NamedTemporaryFile + import requests from xhtml2pdf import pisa # import python module -from PyPDF2 import PdfFileMerger,PdfFileReader -from StringIO import StringIO -from tempfile import NamedTemporaryFile +from pypdf import PdfWriter, PdfReader +from pypdf.errors import PdfReadError +from pypdf import PageRange from django.template.loader import render_to_string -from regluit import settings +from django.conf import settings logger = logging.getLogger(__name__) # Utility function def ask_pdf(context={}): - ask_html = StringIO(unicode(render_to_string('pdf/ask.html', context))) + ask_html = StringIO(str(render_to_string('pdf/ask.html', context))) # open output file for writing (truncated binary) - resultFile = StringIO() + resultFile = BytesIO() # convert HTML to PDF pisaStatus = pisa.CreatePDF( - src=ask_html, # the HTML to convert - dest=resultFile) # file to recieve result - + src=ask_html, # the HTML to convert + dest=resultFile, # file to recieve result + ) # True on success and False on errors assert pisaStatus.err == 0 return resultFile -def pdf_append( file1, file2, file_out ): - merger = PdfFileMerger(strict=False) - merger.append(file1) +def pdf_append(file1, file2, file_out): + merger = PdfWriter(file1) merger.append(file2) merger.write(file_out) merger.close() @@ -37,7 +39,7 @@ def pdf_append( file1, file2, file_out ): def test_pdf(pdf_file): temp = None try: - if isinstance(pdf_file , (str, unicode)): + if isinstance(pdf_file, str): if pdf_file.startswith('http:') or pdf_file.startswith('https:'): temp = NamedTemporaryFile(delete=False) test_file_content = requests.get(pdf_file).content @@ -50,7 +52,7 @@ def test_pdf(pdf_file): pdf_file.seek(0) temp = pdf_file try: - PdfFileReader(temp) + PdfReader(temp) success = True except: success = False @@ -60,7 +62,36 @@ def test_pdf(pdf_file): logger.exception('error testing a pdf: %s' % pdf_file[:100]) return False -def test_test_pdf(self): +def staple_pdf(urllist, user_agent=settings.USER_AGENT, strip_covers=0): + pages = None + all_but_cover = PageRange('%s:' % int(strip_covers)) + merger = PdfWriter(None) + s = requests.Session() + for url in urllist: + try: + response = s.get(url, headers={"User-Agent": user_agent}) + except requests.exceptions.ConnectionError: + logger.error("Error getting url: %s", url) + return None + if response.status_code == 200: + try: + logger.debug('adding %s bytes from %s', len(response.content), url) + merger.append(BytesIO(response.content), pages=pages) + except PdfReadError: + logger.error("error reading pdf url: %s", url) + return None + else: + return None + pages = all_but_cover if strip_covers else pages + out = BytesIO() + try: + merger.write(out) + except (PdfReadError, RecursionError): + logger.error("error writing pdf url: %s", url) + return None + return out + +def test_test_pdf(): assert(test_pdf(settings.TEST_PDF_URL)) temp = NamedTemporaryFile(delete=False) test_file_content = requests.get(settings.TEST_PDF_URL).content diff --git a/core/search.py b/core/search.py index a2fd24b7a..00820baee 100644 --- a/core/search.py +++ b/core/search.py @@ -1,52 +1,58 @@ import re import json -import requests -import regluit.core.isbn +import requests from django.conf import settings +from regluit.core.covers import DEFAULT_COVER +import regluit.core.isbn + def gluejar_search(q, user_ip='69.243.24.29', page=1): """normalizes results from the google books search suitable for gluejar """ results = [] - search_result=googlebooks_search(q, user_ip, page) + search_result = googlebooks_search(q, user_ip, page) if 'items' in search_result.keys(): for item in search_result['items']: v = item['volumeInfo'] - r = {'title': v.get('title', ""), + r = {'title': v.get('title', ""), 'description': v.get('description', ""), 'publisher': v.get('publisher', ""), 'googlebooks_id': item.get('id')} - + # TODO: allow multiple authors - if v.has_key('authors') and len(v['authors']) == 1 : + if 'authors' in v and len(v['authors']) == 1: r['author'] = r['authors_short'] = v['authors'][0] - elif v.has_key('authors') and len(v['authors']) > 2: + elif 'authors' in v and len(v['authors']) > 2: r['author'] = v['authors'][0] - r['authors_short'] = '%s et al.' % v['authors'][0] - elif v.has_key('authors') and len(v['authors']) == 2: + r['authors_short'] = '%s et al.' % v['authors'][0] + elif 'authors' in v and len(v['authors']) == 2: r['author'] = v['authors'][0] - r['authors_short'] = '%s and %s' % (v['authors'][0], v['authors'][1]) + r['authors_short'] = '%s and %s' % (v['authors'][0], v['authors'][1]) else: r['author'] = "" r['isbn_13'] = None - + # pull out isbns for i in v.get('industryIdentifiers', []): if i['type'] == 'ISBN_13': r['isbn_13'] = i['identifier'] elif i['type'] == 'ISBN_10': - if not r['isbn_13'] : + if not r['isbn_13']: r['isbn_13'] = regluit.core.isbn.convert_10_to_13(i['identifier']) - + # cover image - if v.has_key('imageLinks'): + if 'imageLinks' in v: url = v['imageLinks'].get('thumbnail', "") - url = re.sub(r'http://(bks[0-9]+\.)?books\.google\.com', 'https://encrypted.google.com', url) + url = re.sub( + r'http://(bks[0-9]+\.)?books\.google\.com', + 'https://encrypted.google.com', + url, + ) r['cover_image_thumbnail'] = url else: - r['cover_image_thumbnail'] = "/static/images/generic_cover_larger.png" - + r['cover_image_thumbnail'] = DEFAULT_COVER + access_info = item.get('accessInfo') if access_info: epub = access_info.get('epub') @@ -56,20 +62,23 @@ def gluejar_search(q, user_ip='69.243.24.29', page=1): if pdf and pdf.get('downloadLink'): r['first_pdf_url'] = pdf['downloadLink'] results.append(r) - return results + return results def googlebooks_search(q, user_ip, page): if len(q) < 2 or len(q) > 2000: return {} - # XXX: need to pass IP address of user in from the frontend + # XXX: need to pass IP address of user in from the frontend headers = {'X-Forwarded-For': user_ip} - start = (page - 1) * 10 + start = (page - 1) * 10 params = {'q': q, 'startIndex': start, 'maxResults': 10} if hasattr(settings, 'GOOGLE_BOOKS_API_KEY'): params['key'] = settings.GOOGLE_BOOKS_API_KEY - - r = requests.get('https://www.googleapis.com/books/v1/volumes', - params=params, headers=headers) + + r = requests.get( + 'https://www.googleapis.com/books/v1/volumes', + params=params, + headers=headers + ) # urls like https://www.googleapis.com/books/v1/volumes?q=invisible+engines&startIndex=0&maxResults=10&key=[key] return json.loads(r.content) diff --git a/core/signals.py b/core/signals.py index 96f94cf18..0a9c2a46f 100644 --- a/core/signals.py +++ b/core/signals.py @@ -25,6 +25,7 @@ from django.utils.timezone import now from notification import models as notification +from registration.signals import user_activated """ regluit imports @@ -48,7 +49,8 @@ def create_user_objects(sender, created, instance, **kwargs): if created: Wishlist.objects.create(user=instance) profile = UserProfile.objects.create(user=instance) - profile.ml_subscribe() + if instance.social_auth.exists(): + instance.profile.ml_subscribe() except DatabaseError: # this can happen when creating superuser during syncdb since the # core_wishlist table doesn't exist yet @@ -200,7 +202,7 @@ def handle_transaction_charged(sender,transaction=None, **kwargs): transaction.campaign.update_left() notification.send([transaction.user], "purchase_complete", context, True) from regluit.core.tasks import watermark_acq - watermark_acq.delay(new_acq) + watermark_acq.delay(new_acq.id) if transaction.campaign.cc_date < date_today() : transaction.campaign.update_status(send_notice=True) elif transaction.campaign.type is THANKS: @@ -300,7 +302,7 @@ def handle_wishlist_added(supporter, work, **kwargs): from regluit.core.tasks import emit_notifications emit_notifications.delay() - + wishlist_added.connect(handle_wishlist_added) deadline_impending = Signal(providing_args=["campaign"]) @@ -349,4 +351,11 @@ def notify_join_library(sender, created, instance, **kwargs): 'user': instance.user, }) -post_save.connect(notify_join_library, sender=LibraryUser) \ No newline at end of file +post_save.connect(notify_join_library, sender=LibraryUser) + +from registration.signals import user_activated + +def ml_subscribe(user, request, **kwargs): + user.profile.ml_subscribe() + +user_activated.connect(ml_subscribe) \ No newline at end of file diff --git a/core/sitemaps.py b/core/sitemaps.py index 0b29e44a1..605645a64 100644 --- a/core/sitemaps.py +++ b/core/sitemaps.py @@ -1,5 +1,5 @@ from django.contrib.sitemaps import Sitemap -from django.core.urlresolvers import reverse +from django.urls import reverse from regluit.core.models import Work, Edition class WorkSitemap(Sitemap): @@ -7,17 +7,7 @@ class WorkSitemap(Sitemap): limit = 10000 def items(self): - return Work.objects.all() + return Work.objects.filter(is_free=True) def priority(self,work): return '{:.1f}'.format(work.priority()/5.0) - -class PublisherSitemap(Sitemap): - priority = 0.2 - protocol = 'https' - - def items(self): - return Edition.objects.exclude(publisher_name__isnull=True).order_by('publisher_name__name').values('publisher_name').distinct() - - def location(self, pub): - return reverse("bypubname_list",args=[pub['publisher_name']]) diff --git a/core/tasks.py b/core/tasks.py index 36dc8f2bd..f5fe2defc 100644 --- a/core/tasks.py +++ b/core/tasks.py @@ -1,37 +1,37 @@ -""" -external library imports -""" +# +# external library imports +# import logging +import random -from celery.task import task from datetime import timedelta from time import sleep +from celery.task import task -""" -django imports -""" +# +# django imports +# from django.conf import settings from django.contrib.auth.models import User from django.core.mail import send_mail +from django.core.management import call_command from django.utils.timezone import now from notification.engine import send_all from notification import models as notification from mailchimp3 import MailChimp -from mailchimp3.mailchimpclient import MailChimpError -""" -regluit imports -""" +# +# regluit imports +# from regluit.core import ( bookloader, + covers, models, - goodreads, - librarything, - mobigen + librarything ) -from regluit.core.models import Campaign, Acq, Gift +from regluit.core.models import Acq, Campaign, EbookFile, Gift, UserProfile, Work from regluit.core.signals import deadline_impending from regluit.core.parameters import RESERVE, REWARDS, THANKS from regluit.utils.localdatetime import date_today @@ -39,39 +39,33 @@ logger = logging.getLogger(__name__) mc_client = MailChimp(mc_api=settings.MAILCHIMP_API_KEY) -@task +@task def populate_edition(isbn): """given an edition this task will populate the database with additional information about related editions and subjects related to this edition """ bookloader.add_related(isbn) - edition=models.Edition.get_by_isbn(isbn) + edition = models.Edition.get_by_isbn(isbn) if edition: bookloader.add_openlibrary(edition.work) return edition -@task -def load_goodreads_shelf_into_wishlist(user_id, shelf_name='all', goodreads_user_id=None, max_books=None, - expected_number_of_books=None): - user=User.objects.get(id=user_id) - return goodreads.load_goodreads_shelf_into_wishlist(user,shelf_name,goodreads_user_id,max_books, expected_number_of_books) - @task def load_librarything_into_wishlist(user_id, lt_username, max_books=None): - user=User.objects.get(id=user_id) + user = User.objects.get(id=user_id) return librarything.load_librarything_into_wishlist(user, lt_username, max_books) - + @task def fac(n, sleep_interval=None): - # used to test celery task execution - if not(isinstance(n,int) and n >= 0): + # used to test celery task execution + if not(isinstance(n, int) and n >= 0): raise Exception("You can't calculate a factorial of %s " % (str(n))) if n <= 1: return 1 else: res = 1 - for i in xrange(2,n+1): - res = res*i + for i in range(2, n+1): + res = res * i fac.update_state(state="PROGRESS", meta={"current": i, "total": n}) if sleep_interval is not None: sleep(sleep_interval) @@ -80,34 +74,39 @@ def fac(n, sleep_interval=None): @task def send_mail_task(subject, message, from_email, recipient_list, - fail_silently=False, auth_user=None, auth_password=None, - connection=None, override_from_email=True): + fail_silently=False, auth_user=None, auth_password=None, + connection=None, override_from_email=True): """a task to drop django.core.mail.send_mail into """ - # NOTE: since we are currently using Amazon SES, which allows email to be sent only from validated email - # addresses, we force from_email to be one of the validated address unless override_from_email is FALSE + # NOTE: since we are currently using Amazon SES, which allows email to be sent only from + # validated email addresses, we force from_email to be one of the validated + # address unless override_from_email is FALSE try: if override_from_email: try: from_email = settings.DEFAULT_FROM_EMAIL except: pass - r= send_mail(subject, message, from_email, recipient_list, fail_silently=False, auth_user=auth_user, - auth_password=auth_password, connection=connection) + r = send_mail(subject, message, from_email, recipient_list, fail_silently=False, + auth_user=auth_user, auth_password=auth_password, connection=connection) + logger.info('sent mail about %s to %s', subject, recipient_list) except: - r=logger.info('failed to send message:' + message) + r = logger.info('failed to send message:', message) return r - + #task to update the status of active campaigns @task def update_active_campaign_status(): """update the status of all active campaigns -- presumed to be run at midnight Eastern time""" - return [c.update_status(send_notice=True, ignore_deadline_for_success=True, process_transactions=True) for c in Campaign.objects.filter(status='Active') ] + return [ + c.update_status(send_notice=True, ignore_deadline_for_success=True, + process_transactions=True) for c in Campaign.objects.filter(status='Active') + ] @task def emit_notifications(): - logger.info('notifications emitting' ) - return send_all() - + logger.info('notifications emitting') + send_all() + @task def report_new_ebooks(created=None): #created= creation date if created: @@ -126,51 +125,55 @@ def report_new_ebooks(created=None): #created= creation date True ) break - + @task def notify_ending_soon(): c_active = Campaign.objects.filter(status='Active', type=REWARDS) for c in c_active: if c.deadline - now() < timedelta(7) and c.deadline - now() >= timedelta(6): - """ - if the campaign is still active and there's only a week left until it closes, send reminder notification - """ + # if the campaign is still active and there's only a week left until it closes, + # send reminder notification deadline_impending.send(sender=None, campaign=c) @task -def watermark_acq(acq): +def watermark_acq(acq_id): + try: + acq = Acq.objects.get(acq_id) + except Acq.DoesNotExist as e: + logger.error("error getting acq %s", acq_id) + return False acq.get_watermarked() - + @task -def process_ebfs(campaign): +def process_ebfs(campaign_id): + try: + campaign = Campaign.objects.get(campaign_id) + except Campaign.DoesNotExist as e: + logger.error("error getting acq %s", campaign_id) + return False if campaign.type == THANKS: if campaign.use_add_ask: campaign.add_ask_to_ebfs() else: campaign.revert_asks() - campaign.make_mobis() - -@task -def make_mobi(ebookfile): - return ebookfile.make_mobi() - + @task def refresh_acqs(): in_10_min = now() + timedelta(minutes=10) acqs = Acq.objects.filter(refreshed=False, refreshes__lt=in_10_min) - logger.info('refreshing %s acqs' % acqs.count()) + logger.info('refreshing %s acqs', acqs.count()) for acq in acqs: for hold in acq.holds: # create a 1 day reserve on the acq - reserve_acq = Acq.objects.create( - user = hold.user, - work = hold.work, - license = RESERVE, - lib_acq = acq, - ) + reserve_acq = Acq.objects.create( + user=hold.user, + work=hold.work, + license=RESERVE, + lib_acq=acq, + ) # the post_save handler takes care of pushing expires vis acq.expires_in - + # notify the user with the hold if 'example.org' not in reserve_acq.user.email: notification.send_now([reserve_acq.user], "library_reserve", {'acq':reserve_acq}) @@ -181,15 +184,13 @@ def refresh_acqs(): acq.refreshed = True @task -def convert_to_mobi(input_url, input_format="application/epub+zip"): - return mobigen.convert_to_mobi(input_url, input_format) - -@task -def generate_mobi_ebook_for_edition(edition): - return mobigen.generate_mobi_ebook_for_edition(edition) +def ml_subscribe_task(profile_id, **kwargs): + try: + profile = UserProfile.objects.get(profile_id) + except UserProfile.DoesNotExist as e: + logger.error("error getting profile %s", profile_id) + return False -@task -def ml_subscribe_task(profile, **kwargs): try: if not profile.on_ml: data = {"email_address": profile.user.email, "status_if_new": "pending"} @@ -199,20 +200,36 @@ def ml_subscribe_task(profile, **kwargs): data=data, ) return True - except Exception, e: - logger.error("error subscribing to mailchimp list %s" % (e)) + except Exception as e: + logger.error("error subscribing to mailchimp list %s", e) return False @task def notify_unclaimed_gifts(): unclaimed = Gift.objects.filter(used=None) for gift in unclaimed: - """ - send notice every 7 days, but stop at 10x - """ - unclaimed_duration = (now() - gift.acq.created ).days + # send notice every 7 days, but stop at 10x + unclaimed_duration = (now() - gift.acq.created).days if unclaimed_duration > 70: return - if unclaimed_duration > 0 and unclaimed_duration % 7 == 0 : # first notice in 7 days + if unclaimed_duration > 0 and unclaimed_duration % 7 == 0: # first notice in 7 days notification.send_now([gift.acq.user], "purchase_gift_waiting", {'gift':gift}, True) notification.send_now([gift.giver], "purchase_notgot_gift", {'gift':gift}, True) + +@task +def periodic_cleanup(): + call_command('clearsessions') + call_command('cleanupregistration') + +@task +def feature_new_work(): + works = Work.objects.filter(is_free=True, featured__isnull=True).order_by('-num_wishes') + work = works[random.randrange(0, 50)] + work.featured = now() + work.save() + +@task +def make_cover_thumbnail(url, geom_string, **options): + success = covers.make_cover_thumbnail(url, geom_string, **options) + logger.error('bad cover image %s: %s', url) + \ No newline at end of file diff --git a/core/tests.py b/core/tests.py index 5c5050bd7..9e34a77c0 100755 --- a/core/tests.py +++ b/core/tests.py @@ -6,15 +6,17 @@ from decimal import Decimal as D from math import factorial import unittest -from urlparse import parse_qs, urlparse +from urllib.parse import urlparse, parse_qs from tempfile import NamedTemporaryFile from time import sleep, mktime -from celery.task.sets import TaskSet +from celery import group import requests import requests_mock +from pyepub import EPUB #django imports +from django.apps import apps from django.conf import settings from django.contrib.auth.models import User from django.contrib.contenttypes.models import ContentType @@ -30,26 +32,29 @@ from django_comments.models import Comment -#regluit imports +from regluit.payment.models import Transaction +from regluit.payment.parameters import PAYMENT_TYPE_AUTHORIZATION +from regluit.utils.localdatetime import date_today -from regluit.core import ( - isbn, +from . import ( bookloader, + covers, + isbn, + librarything, models, + parameters, search, - goodreads, - librarything, tasks, - parameters, ) -from regluit.core.models import ( +from .epub import test_epub +from .loaders.utils import (load_from_books, loaded_book_ok, ) +from .models import ( Campaign, Work, UnglueitError, Edition, RightsHolder, Claim, - Key, Ebook, Premium, Subject, @@ -58,18 +63,11 @@ EbookFile, Acq, Hold, + safe_get_work, ) -from regluit.libraryauth.models import Library -from regluit.core.parameters import TESTING, LIBRARY, RESERVE -from regluit.core.loaders.utils import (load_from_books, loaded_book_ok, ) -from regluit.core.validation import valid_subject -from regluit.frontend.views import safe_get_work -from regluit.payment.models import Transaction -from regluit.payment.parameters import PAYMENT_TYPE_AUTHORIZATION -from regluit.pyepub import EPUB -from regluit.utils.localdatetime import date_today -from .epub import test_epub +from .parameters import TESTING, LIBRARY, RESERVE from .pdf import test_pdf +from .validation import valid_subject TESTDIR = os.path.join(os.path.dirname(__file__), '../test/') YAML_VERSIONFILE = os.path.join(TESTDIR, 'versiontest.yaml') @@ -111,7 +109,7 @@ def test_add_by_yaml(self): def test_add_by_isbn_mock(self): with requests_mock.Mocker(real_http=True) as m: - with open(os.path.join(TESTDIR, 'gb_hamilton.json')) as gb: + with open(os.path.join(TESTDIR, 'gb_hamilton.json'), 'rb') as gb: m.get('https://www.googleapis.com/books/v1/volumes', content=gb.read()) self.test_add_by_isbn(mocking=True) @@ -120,22 +118,22 @@ def test_add_by_isbn(self, mocking=False): if not (mocking or settings.TEST_INTEGRATION): return # edition - edition = bookloader.add_by_isbn('9780143034759') + edition = bookloader.add_by_isbn('9781101200858') self.assertEqual(edition.title, u'Alexander Hamilton') - self.assertEqual(edition.publication_date, u'2005') + self.assertTrue('2005' in edition.publication_date) self.assertEqual(edition.publisher, u'Penguin') - self.assertEqual(edition.isbn_10, '0143034758') - self.assertEqual(edition.isbn_13, '9780143034759') - self.assertEqual(edition.googlebooks_id, '4iafgTEhU3QC') + self.assertEqual(edition.isbn_10, '1101200855') + self.assertEqual(edition.isbn_13, '9781101200858') + self.assertTrue(edition.googlebooks_id in ('4z5eL5SGjEoC', '4iafgTEhU3QC')) # authors self.assertEqual(edition.authors.all().count(), 1) - self.assertEqual(edition.authors.all()[0].name, u'Ron Chernow') + self.assertEqual(edition.authors.first().name, u'Ron Chernow') # work self.assertTrue(edition.work) - self.assertEqual(edition.work.googlebooks_id, '4iafgTEhU3QC') - self.assertEqual(edition.work.first_isbn_13(), '9780143034759') + self.assertTrue(edition.googlebooks_id in ('4z5eL5SGjEoC', '4iafgTEhU3QC')) + self.assertEqual(edition.work.first_isbn_13(), '9781101200858') # test duplicate pubname ed2 = Edition.objects.create(work=edition.work) @@ -155,19 +153,19 @@ def test_add_by_isbn(self, mocking=False): def test_language_locale_mock(self): with requests_mock.Mocker(real_http=True) as m: - with open(os.path.join(TESTDIR, 'zhCN.json')) as gb: + with open(os.path.join(TESTDIR, 'gb_zhCN.json'), 'rb') as gb: m.get('https://www.googleapis.com/books/v1/volumes', content=gb.read()) self.test_language_locale(mocking=True) def test_language_locale(self, mocking=False): if not (mocking or settings.TEST_INTEGRATION): return - edition = bookloader.add_by_isbn('9787104030126') + edition = bookloader.add_by_isbn('9787115401519') self.assertEqual(edition.work.language, u'zh-CN') def test_update_edition_mock(self): with requests_mock.Mocker(real_http=True) as m: - with open(os.path.join(TESTDIR, 'python4da.json')) as gb: + with open(os.path.join(TESTDIR, 'python4da.json'), 'rb') as gb: m.get('https://www.googleapis.com/books/v1/volumes', content=gb.read()) self.test_update_edition(mocking=True) @@ -198,7 +196,7 @@ def test_missing_isbn(self): def test_thingisbn_mock(self): with requests_mock.Mocker(real_http=True) as m: - with open(os.path.join(TESTDIR, '9780441569595.xml')) as lt: + with open(os.path.join(TESTDIR, '9780441569595.xml'), 'rb') as lt: m.get('https://www.librarything.com/api/thingISBN/0441007465', content=lt.read()) self.test_thingisbn(mocking=True) @@ -220,7 +218,7 @@ def test_add_related(self): langbefore = models.Work.objects.filter(language=lang).count() # ask for related editions to be added using the work we just created with requests_mock.Mocker(real_http=True) as m: - with open(os.path.join(TESTDIR, '9780441569595.xml')) as lt: + with open(os.path.join(TESTDIR, '9780441569595.xml'), 'rb') as lt: m.get('https://www.librarything.com/api/thingISBN/0441007465', content=lt.read()) bookloader.add_related('0441007465') # should join the editions self.assertTrue(models.Edition.objects.count() >= edbefore) @@ -241,7 +239,7 @@ def test_add_related(self): def test_populate_edition(self): edition = bookloader.add_by_isbn('9780606301121') # A People's History Of The United States with requests_mock.Mocker(real_http=True) as m: - with open(os.path.join(TESTDIR, '9780061989834.xml')) as lt: + with open(os.path.join(TESTDIR, '9780061989834.xml'), 'rb') as lt: m.get('https://www.librarything.com/api/thingISBN/9780606301121', content=lt.read()) edition = tasks.populate_edition.run(edition.isbn_13) self.assertTrue(edition.work.editions.all().count() > 10) @@ -291,6 +289,8 @@ def test_merge_works_mechanics(self): self.assertTrue(w2.is_free) self.assertFalse(w1.is_free) + sub2 = Subject.objects.get(pk=sub2.pk) + self.assertEqual(sub2.num_free, 1) w1_id = w1.id w2_id = w2.id @@ -310,7 +310,8 @@ def test_merge_works_mechanics(self): self.assertEqual(models.Work.objects.count(), before + 1) self.assertEqual(models.WasWork.objects.count(), wasbefore + 1) self.assertEqual(w1.subjects.count(), 2) - + sub1 = Subject.objects.get(pk=sub1.pk) + self.assertEqual(sub2.num_free, 1) self.assertTrue(w1.is_free) # getting proper view? @@ -372,7 +373,7 @@ def test_merge_works(self): c2.save() self.assertEqual(c2.pk, e2.work.last_campaign().pk) # comment on the works - site = Site.objects.all()[0] + site = Site.objects.first() wct = ContentType.objects.get_for_model(models.Work) comment1 = Comment( content_type=wct, @@ -423,14 +424,14 @@ def test_merge_works(self): def test_ebook(self): with requests_mock.Mocker(real_http=True) as m: - with open(os.path.join(TESTDIR, 'gb_latinlanguage.json')) as gb: + with open(os.path.join(TESTDIR, 'gb_latinlanguage.json'), 'rb') as gb: m.get('https://www.googleapis.com/books/v1/volumes', content=gb.read()) edition = bookloader.add_by_oclc('1246014') # we've seen the public domain status of this book fluctuate -- and the OCLC # number can disappear. So if the ebook count is 2 then test #if edition is not None and edition.ebooks.count() == 2: self.assertEqual(edition.ebooks.count(), 2) - #ebook_epub = edition.ebooks.all()[0] + #ebook_epub = edition.ebooks.first() ebook_epub = edition.ebooks.filter(format='epub')[0] self.assertEqual(ebook_epub.format, 'epub') self.assertEqual(parse_qs(urlparse(ebook_epub.url).query).get("id"), ['N1RfAAAAMAAJ']) @@ -459,15 +460,15 @@ def test_ebook(self): ebook_pdf.increment() updated_ebook = Ebook.objects.get(pk=ebook_pdf.pk) - self.assertEqual(int(updated_ebook.download_count), 1) - self.assertEqual(int(edition.work.download_count), 1) + #self.assertEqual(int(updated_ebook.download_count), 1) + #self.assertEqual(int(edition.work.download_count), 1) def test_add_no_ebook(self): # this edition lacks an ebook, but we should still be able to load it # http://books.google.com/books?id=D-WjL_HRbNQC&printsec=frontcover#v=onepage&q&f=false # Social Life of Information with requests_mock.Mocker(real_http=True) as m: - with open(os.path.join(TESTDIR, 'gb_sociallife.json')) as gb: + with open(os.path.join(TESTDIR, 'gb_sociallife.json'), 'rb') as gb: m.get('https://www.googleapis.com/books/v1/volumes', content=gb.read()) e = bookloader.add_by_isbn('1578517087') self.assertTrue(e) @@ -480,12 +481,11 @@ def test_add_openlibrary(self): self.assertTrue(len(subjects) > 10) self.assertTrue('Science fiction' in subjects) self.assertTrue('/works/OL27258W' in work.identifiers.filter(type='olwk').values_list('value', flat=True)) - self.assertTrue('888628' in work.identifiers.filter(type='gdrd').values_list('value', flat=True)) self.assertTrue('609' in work.identifiers.filter(type='ltwk').values_list('value', flat=True)) def test_unicode_openlibrary(self): with requests_mock.Mocker(real_http=True) as m: - with open(os.path.join(TESTDIR, 'gb_fightclub.json')) as gb: + with open(os.path.join(TESTDIR, 'gb_fightclub.json'), 'rb') as gb: m.get('https://www.googleapis.com/books/v1/volumes', content=gb.read()) work = bookloader.add_by_isbn('9783894808358').work #fight club bookloader.add_openlibrary(work) @@ -526,8 +526,8 @@ class SearchTests(TestCase): def test_search_mock(self): with requests_mock.Mocker(real_http=True) as m: with open( - os.path.join(TESTDIR, 'gb_melville.json') - ) as gb, open(os.path.join(TESTDIR, 'gb_melville2.json')) as gb2: + os.path.join(TESTDIR, 'gb_melville.json'), 'rb' + ) as gb, open(os.path.join(TESTDIR, 'gb_melville2.json'), 'rb') as gb2: m.get( 'https://www.googleapis.com/books/v1/volumes', [{'content':gb2.read()}, {'content':gb.read()}] @@ -543,17 +543,17 @@ def test_basic_search(self, mocking=False): self.assertEqual(len(results), 10) r = results[0] - self.assertTrue(r.has_key('title')) - self.assertTrue(r.has_key('author')) - self.assertTrue(r.has_key('description')) - self.assertTrue(r.has_key('cover_image_thumbnail')) + self.assertTrue('title' in r) + self.assertTrue('author' in r) + self.assertTrue('description' in r) + self.assertTrue('cover_image_thumbnail' in r) self.assertTrue( r['cover_image_thumbnail'].startswith('https') or r['cover_image_thumbnail'].startswith('http') ) - self.assertTrue(r.has_key('publisher')) - self.assertTrue(r.has_key('isbn_13')) - self.assertTrue(r.has_key('googlebooks_id')) + self.assertTrue('publisher' in r) + self.assertTrue('isbn_13' in r) + self.assertTrue('googlebooks_id' in r) def test_pagination(self, mocking=False): if not (mocking or settings.TEST_INTEGRATION): @@ -682,7 +682,7 @@ def test_campaign_status(self): #cloning (note we changed c3 to w2 to make it clonable) c7 = c3.clone() self.assertEqual(c7.status, 'INITIALIZED') - self.assertEqual(c7.premiums.all()[0].description, 'botsnack') + self.assertEqual(c7.premiums.first().description, 'botsnack') # SUCCESSFUL @@ -774,7 +774,7 @@ def test_add_remove(self): user.wishlist.add_work(work, 'test') self.assertEqual(user.wishlist.works.count(), 1) self.assertEqual(work.num_wishes, num_wishes+1) - self.assertEqual(work.priority(),1) + self.assertEqual(work.priority(), 2) user.wishlist.remove_work(work) self.assertEqual(user.wishlist.works.count(), 0) self.assertEqual(work.num_wishes, num_wishes) @@ -790,43 +790,12 @@ def test_single_fac(self): def test_subtask(self): n = 30 subtasks = [tasks.fac.subtask(args=(x,)) for x in range(n)] - job = TaskSet(tasks=subtasks) + job = group(subtasks) result = job.apply_async() while not result.ready(): sleep(0.2) self.assertEqual(result.join(), [factorial(x) for x in range(n)]) -class GoodreadsTest(TestCase): - - @unittest.skip("Goodreads down at the moment") - def test_goodreads_shelves(self): - if not settings.GOODREADS_API_SECRET: - return - # test to see whether the core undeletable shelves are on the list - gr_uid = "767708" # for Raymond Yee - gc = goodreads.GoodreadsClient( - key=settings.GOODREADS_API_KEY, - secret=settings.GOODREADS_API_SECRET - ) - shelves = gc.shelves_list(gr_uid) - shelf_names = [s['name'] for s in shelves['user_shelves']] - self.assertTrue('currently-reading' in shelf_names) - self.assertTrue('read' in shelf_names) - self.assertTrue('to-read' in shelf_names) - - @unittest.skip("Goodreads down at the moment") - def test_review_list_unauth(self): - if not settings.GOODREADS_API_SECRET: - return - gr_uid = "767708" # for Raymond Yee - gc = goodreads.GoodreadsClient( - key=settings.GOODREADS_API_KEY, - secret=settings.GOODREADS_API_SECRET - ) - reviews = gc.review_list_unauth(user_id=gr_uid, shelf='read') - # test to see whether there is a book field in each of the review - # url for test is https://www.goodreads.com/review/list.xml?id=767708&shelf=read&page=1&per_page=20&order=a&v=2&key=[key] - self.assertTrue(all([r.has_key("book") for r in reviews])) class LibraryThingTest(TestCase): @@ -903,24 +872,12 @@ def test_ISBN(self): self.assertEqual(isbn.ISBN(python_13).validate(), python_10) # curious about set membership - self.assertEqual(len(set([isbn.ISBN(milosz_10), isbn.ISBN(milosz_13)])), 2) self.assertEqual(len(set([str(isbn.ISBN(milosz_10)), str(isbn.ISBN(milosz_13))])), 2) self.assertEqual( len(set([isbn.ISBN(milosz_10).to_string(), isbn.ISBN(milosz_13).to_string()])), 1 ) -class EncryptedKeyTest(TestCase): - def test_create_read_key(self): - name = "the great answer" - value = "42" - key = Key.objects.create(name=name, value=value) - key.save() - # do we get back the value? - self.assertEqual(Key.objects.filter(name=name)[0].value, value) - # just checking that the encrypted value is not the same as the value - self.assertNotEqual(key.encrypted_value, value) # is this always true? - class SafeGetWorkTest(TestCase): def test_good_work(self): w1 = models.Work() @@ -934,7 +891,7 @@ def test_good_work(self): self.assertEqual(work, w1) work = safe_get_work(w2_id) self.assertEqual(work, w1) - self.assertRaises(Http404, safe_get_work, 3) + self.assertRaises(Work.DoesNotExist, safe_get_work, 3) class WorkTests(TestCase): def setUp(self): @@ -988,23 +945,14 @@ def test_download_page(self): eb1.edition = e1 eb1.format = 'epub' - eb2 = models.Ebook() - eb2.url = "https://example2.com" - eb2.edition = e2 - eb2.format = 'mobi' eb1.save() - eb2.save() anon_client = Client() response = anon_client.get("/work/%s/download/" % w.id, follow=True) - self.assertContains(response, "/download_ebook/%s/"% eb1.id, count=11) - self.assertContains(response, "/download_ebook/%s/"% eb2.id, count=4) + self.assertContains(response, "/download_ebook/%s/"% eb1.id, count=12) self.assertTrue(eb1.edition.work.is_free) eb1.delete() - self.assertTrue(eb2.edition.work.is_free) - eb2.delete() - self.assertFalse(eb2.edition.work.is_free) class MailingListTests(TestCase): #mostly to check that MailChimp account is setp correctly @@ -1018,12 +966,30 @@ def test_mailchimp(self): self.user = User.objects.create_user('chimp_test', 'eric@gluejar.com', 'chimp_test') self.assertTrue(self.user.profile.on_ml) +class CoverTests(TestCase): + test_image = 'https://unglue.it/static/images/logo.png' + test_bad_image = 'https://example.com/static/images/logo.png' + def setUp(self): + self.work = Work.objects.create(title="Cover Work") + self.edition = Edition.objects.create(title=self.work.title, work=self.work) + covers.sorl_get_thumbnail(self.test_image, 'x550', crop='noop', quality=95) + + def test_cached_cover(self): + thumb = covers.get_thumbnail(self.test_image, 'x550', crop='noop', quality=95) + self.assertTrue(thumb.exists()) + self.assertTrue(thumb.width, 550) + + def test_bad_cover(self): + thumb = covers.get_thumbnail(self.test_bad_image, '128', crop='noop', quality=95) + self.assertEqual(thumb.url, covers.DEFAULT_COVER) + + @override_settings(LOCAL_TEST=True) class EbookFileTests(TestCase): fixtures = ['initial_data.json'] def test_badepub_errors(self): textfile = NamedTemporaryFile(delete=False) - textfile.write("bad text file") + textfile.write(b"bad text file") textfile.seek(0) self.assertTrue(test_epub(textfile)) @@ -1039,9 +1005,9 @@ def test_ebookfile(self): c = Campaign.objects.create( work=w, type=parameters.BUY2UNGLUE, - cc_date_initial=datetime(2020, 1, 1), + cc_date_initial=datetime(2030, 1, 1), target=1000, - deadline=datetime(2020, 1, 1), + deadline=datetime(2030, 1, 1), license='CC BY', description="dummy description", ) @@ -1054,7 +1020,7 @@ def test_ebookfile(self): try: # now we can try putting the test epub file into Django storage - temp_file = open(temp.name) + temp_file = open(temp.name, 'rb') dj_file = DjangoFile(temp_file) ebf = EbookFile(format='epub', edition=e, file=dj_file) @@ -1069,7 +1035,7 @@ def test_ebookfile(self): self.assertEqual(len(test_epub.opf), 4) self.assertTrue(len(test_epub.opf[2]) < 30) - acq = Acq.objects.create(user=u,work=w,license=TESTING) + acq = Acq.objects.create(user=u, work=w, license=TESTING) self.assertIsNot(acq.nonce, None) url = acq.get_watermarked().download_link_epub @@ -1084,7 +1050,7 @@ def test_ebookfile(self): #flip the campaign to success c.cc_date_initial = datetime(2012, 1, 1) c.update_status() - self.assertEqual(c.work.ebooks().count(), 2) + self.assertEqual(c.work.ebooks().count(), 1) c.do_watermark = False c.save() url = acq.get_watermarked().download_link_epub @@ -1108,7 +1074,7 @@ def test_ebookfile_thanks(self): temp.close() try: # now we can try putting the test pdf file into Django storage - temp_file = open(temp.name) + temp_file = open(temp.name, 'rb') dj_file = DjangoFile(temp_file) ebf = EbookFile(format='pdf', edition=e, file=dj_file) @@ -1139,7 +1105,7 @@ def test_ebookfile_thanks(self): temp.close() try: # now we can try putting the test pdf file into Django storage - temp_file = open(temp.name) + temp_file = open(temp.name, 'rb') dj_file = DjangoFile(temp_file) ebf = EbookFile(format='epub', edition=e, file=dj_file) @@ -1149,15 +1115,12 @@ def test_ebookfile_thanks(self): ebf.ebook = eb ebf.save() temp_file.close() - ebf.make_mobi() finally: # make sure we get rid of temp file os.remove(temp.name) #test the ask-appender c.add_ask_to_ebfs() self.assertTrue(c.work.ebookfiles().filter(asking=True, format='epub').count() > 0) - if settings.MOBIGEN_URL: - self.assertTrue(c.work.ebookfiles().filter(asking=True, format='mobi').count() > 0) self.assertTrue(c.work.ebookfiles().filter(asking=True, ebook__active=True).count() > 0) self.assertTrue(c.work.ebookfiles().filter(asking=False, ebook__active=True).count() == 0) #test the unasker @@ -1170,35 +1133,22 @@ def test_bad_ebookfile(self): e = Edition.objects.create(title=w.title, work=w) temp = NamedTemporaryFile(delete=False) - test_file_content = "bad text file" + test_file_content = b"bad text file" temp.write(test_file_content) temp.close() try: # put the bad file into Django storage - temp_file = open(temp.name) + temp_file = open(temp.name, 'rb') dj_file = DjangoFile(temp_file) ebf = EbookFile(format='epub', edition=e, file=dj_file) ebf.save() temp_file.close() - ebf.make_mobi() finally: # make sure we get rid of temp file os.remove(temp.name) - self.assertTrue(ebf.mobied < 0) -class MobigenTests(TestCase): - def test_convert_to_mobi(self): - """ - check the size of the mobi output of a Moby Dick epub - """ - from regluit.core.mobigen import convert_to_mobi - if settings.TEST_INTEGRATION: - output = convert_to_mobi( - "https://github.com/GITenberg/Moby-Dick--Or-The-Whale_2701/releases/download/0.2.0/Moby-Dick-Or-The-Whale.epub" - ) - self.assertTrue(len(output) > 2207877) @override_settings(LOCAL_TEST=True) class LibTests(TestCase): @@ -1211,6 +1161,7 @@ def test_purchase(self): e = Edition.objects.create(title=w.title, work=w) u = User.objects.create_user('test', 'test@example.org', 'testpass') lu = User.objects.create_user('library', 'testu@example.org', 'testpass') + Library = apps.get_model('libraryauth', 'Library') lib = Library.objects.create(user=lu, owner=u) c = Campaign.objects.create( work=w, @@ -1251,11 +1202,11 @@ def test_ebooks_in_github_release(self): ) expected_set = set([ ('epub', u'Adventures-of-Huckleberry-Finn.epub'), - ('mobi', u'Adventures-of-Huckleberry-Finn.mobi'), ('pdf', u'Adventures-of-Huckleberry-Finn.pdf') ]) - self.assertEqual(set(ebooks), expected_set) + self.assertTrue(('epub', 'Adventures-of-Huckleberry-Finn.epub') in set(ebooks)) + self.assertTrue(('pdf', 'Adventures-of-Huckleberry-Finn.pdf') in set(ebooks)) class OnixLoaderTests(TestCase): fixtures = ['initial_data.json'] diff --git a/core/validation.py b/core/validation.py index aed5a3fdd..26b312c5f 100644 --- a/core/validation.py +++ b/core/validation.py @@ -4,16 +4,19 @@ ''' import re import datetime +import logging from dateutil.parser import parse -from PyPDF2 import PdfFileReader +from pypdf import PdfReader from django.forms import ValidationError from django.utils.translation import ugettext_lazy as _ -from regluit.pyepub import EPUB -from regluit.mobi import Mobi +from pyepub import EPUB from .isbn import ISBN +from regluit.utils.text import remove_author_junk + +logger = logging.getLogger(__name__) ID_VALIDATION = { 'http': (re.compile(r"(https?|ftp)://(-\.)?([^\s/?\.#]+\.?)+(/[^\s]*)?$", @@ -21,8 +24,8 @@ "The Web Address must be a valid http(s) URL."), 'isbn': (u'^([\\dxX \\-–—‐,;]+|delete)$', #includes unicode hyphen, endash and emdash "The ISBN must be a valid ISBN-13."), - 'doab': (r'^(\d{1,6}|delete)$', - "The value must be 1-6 digits."), + 'doab': (r'^20.500.12854/(\d{5,8}|delete)$', + "The value must be a handle, starting with 20.500.12854/, followed by 5-8 digits."), 'gtbg': (r'^(\d{1,6}|delete)$', "The Gutenberg number must be 1-6 digits."), 'doi': (r'^(https?://dx\.doi\.org/|https?://doi\.org/)?(10\.\d+/\S+|delete)$', @@ -41,6 +44,8 @@ "The Unglue.it ID must be 1-6 digits."), 'ltwk': (r'^(\d{1,8}|delete)$', "The LibraryThing work ID must be 1-8 digits."), + 'oapn': (r'^(\d{1,8}|delete)$', + "The OAPEN ID must be 1-8 digits."), } def isbn_cleaner(value): @@ -76,10 +81,10 @@ def doi_cleaner(value): } def identifier_cleaner(id_type, quiet=False): - if ID_VALIDATION.has_key(id_type): + if id_type in ID_VALIDATION: (regex, err_msg) = ID_VALIDATION[id_type] extra = ID_MORE_VALIDATION.get(id_type, None) - if isinstance(regex, (str, unicode)): + if isinstance(regex, str): regex = re.compile(regex) def cleaner(value): if not value: @@ -105,17 +110,13 @@ def test_file(the_file, fformat): try: book = EPUB(the_file.file) except Exception as e: + logger.exception(e) raise ValidationError(_('Are you sure this is an EPUB file?: %s' % e)) - elif fformat == 'mobi': - try: - book = Mobi(the_file.file) - book.parse() - except Exception as e: - raise ValidationError(_('Are you sure this is a MOBI file?: %s' % e)) elif fformat == 'pdf': try: - PdfFileReader(the_file.file) - except Exception, e: + PdfReader(the_file.file) + except Exception as e: + logger.exception(e) raise ValidationError(_('%s is not a valid PDF file' % the_file.name)) return True @@ -146,6 +147,25 @@ def valid_subject(subject_name): return False return True +def explode_bic(subject_name): + subjects = [] + if subject_name.startswith('bic Book Industry Communication::'): + for sub in subject_name.split('::')[1:]: + try: + subjects.append(sub.strip().split(maxsplit=1)[1]) + except IndexError: + continue + else: + subjects = [subject_name] + return subjects + +def explode_bics(subjects): + exploded = [] + for s in subjects: + exploded.extend(explode_bic(s)) + return exploded + + reverse_name_comma = re.compile(r',(?! *Jr[\., ])') def unreverse_name(name): @@ -188,6 +208,7 @@ def auth_cleaner(auth): auth = _and_.sub(',', auth) authlist = comma_list_delim.split(auth) for auth in authlist: + auth = remove_author_junk(auth) cleaned.append(spaces.sub(' ', auth.strip())) return cleaned diff --git a/core/views.py b/core/views.py index 3f44d7bca..00f0f60c4 100755 --- a/core/views.py +++ b/core/views.py @@ -12,12 +12,12 @@ def test_read(request): row_id = 1 - print "Attempting to read row" + print("Attempting to read row") # A read the waits for the exclusive lock for the row campaign = Campaign.objects.raw("SELECT * FROM core_campaign WHERE id=%d FOR UPDATE" % row_id)[0] - print "Successfully read row data %d" % campaign.target + print("Successfully read row data %d" % campaign.target) except: traceback.print_exc() @@ -31,7 +31,7 @@ def test_write(request): row_id = 1 campaign = Campaign.objects.get(id=row_id) - print "Attempting to write row via ordinary ORM" + print("Attempting to write row via ordinary ORM") # # Modify the data. This will block if any shared lock (Either FOR UPDATE or LOCK IN SHARED MODE is held @@ -39,7 +39,7 @@ def test_write(request): campaign.target = campaign.target + 10 campaign.save() - print "Successfully write new row data %d" % campaign.target + print("Successfully write new row data %d" % campaign.target) except: traceback.print_exc() @@ -52,11 +52,11 @@ def test_lock(request): try: row_id = 1 - print "Attempting to acquire row lock" + print("Attempting to acquire row lock") campaign = Campaign.objects.raw("SELECT * FROM core_campaign WHERE id=%d FOR UPDATE" % row_id)[0] - print "Row lock acquired, modifying data" + print("Row lock acquired, modifying data") # Modify the data campaign.target = campaign.target + 10 @@ -72,9 +72,9 @@ def test_lock(request): # As soon as the function completes, the transaction will be committed and the lock released. # You can modify the commit_on_success decorator to get different transaction behaviors # - print "Thread sleeping for 10 seconds" + print("Thread sleeping for 10 seconds") time.sleep(10) - print "Thread sleep complete" + print("Thread sleep complete") except: traceback.print_exc() diff --git a/deploy/celerybeat b/deploy/celerybeat deleted file mode 100644 index 34b9ad6a4..000000000 --- a/deploy/celerybeat +++ /dev/null @@ -1,154 +0,0 @@ -#!/bin/bash -# ========================================================= -# celerybeat - Starts the Celery periodic task scheduler. -# ========================================================= -# -# :Usage: /etc/init.d/celerybeat {start|stop|force-reload|restart|try-restart|status} -# :Configuration file: /etc/default/celerybeat or /etc/default/celeryd -# -# See http://docs.celeryq.org/en/latest/cookbook/daemonizing.html#init-script-celerybeat -# This file is copied from https://github.com/ask/celery/blob/2.4/contrib/generic-init.d/celerybeat - -### BEGIN INIT INFO -# Provides: celerybeat -# Required-Start: $network $local_fs $remote_fs -# Required-Stop: $network $local_fs $remote_fs -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Short-Description: celery periodic task scheduler -### END INIT INFO - -# Cannot use set -e/bash -e since the kill -0 command will abort -# abnormally in the absence of a valid process ID. -#set -e - -DEFAULT_PID_FILE="/var/run/celerybeat.pid" -DEFAULT_LOG_FILE="/var/log/celerybeat.log" -DEFAULT_LOG_LEVEL="INFO" -DEFAULT_CELERYBEAT="celerybeat" - -# /etc/init.d/ssh: start and stop the celery task worker daemon. - -if test -f /etc/default/celeryd; then - . /etc/default/celeryd -fi - -if test -f /etc/default/celerybeat; then - . /etc/default/celerybeat -fi - -CELERYBEAT=${CELERYBEAT:-$DEFAULT_CELERYBEAT} -CELERYBEAT_PID_FILE=${CELERYBEAT_PID_FILE:-${CELERYBEAT_PIDFILE:-$DEFAULT_PID_FILE}} -CELERYBEAT_LOG_FILE=${CELERYBEAT_LOG_FILE:-${CELERYBEAT_LOGFILE:-$DEFAULT_LOG_FILE}} -CELERYBEAT_LOG_LEVEL=${CELERYBEAT_LOG_LEVEL:-${CELERYBEAT_LOGLEVEL:-$DEFAULT_LOG_LEVEL}} - -export CELERY_LOADER - -CELERYBEAT_OPTS="$CELERYBEAT_OPTS -f $CELERYBEAT_LOG_FILE -l $CELERYBEAT_LOG_LEVEL" - -if [ -n "$2" ]; then - CELERYBEAT_OPTS="$CELERYBEAT_OPTS $2" -fi - -CELERYBEAT_LOG_DIR=`dirname $CELERYBEAT_LOG_FILE` -CELERYBEAT_PID_DIR=`dirname $CELERYBEAT_PID_FILE` -if [ ! -d "$CELERYBEAT_LOG_DIR" ]; then - mkdir -p $CELERYBEAT_LOG_DIR -fi -if [ ! -d "$CELERYBEAT_PID_DIR" ]; then - mkdir -p $CELERYBEAT_PID_DIR -fi - -# Extra start-stop-daemon options, like user/group. -if [ -n "$CELERYBEAT_USER" ]; then - DAEMON_OPTS="$DAEMON_OPTS --uid $CELERYBEAT_USER" - chown "$CELERYBEAT_USER" $CELERYBEAT_LOG_DIR $CELERYBEAT_PID_DIR -fi -if [ -n "$CELERYBEAT_GROUP" ]; then - DAEMON_OPTS="$DAEMON_OPTS --gid $CELERYBEAT_GROUP" - chgrp "$CELERYBEAT_GROUP" $CELERYBEAT_LOG_DIR $CELERYBEAT_PID_DIR -fi - -CELERYBEAT_CHDIR=${CELERYBEAT_CHDIR:-$CELERYD_CHDIR} -if [ -n "$CELERYBEAT_CHDIR" ]; then - DAEMON_OPTS="$DAEMON_OPTS --workdir $CELERYBEAT_CHDIR" -fi - - -export PATH="${PATH:+$PATH:}/usr/sbin:/sbin" - -check_dev_null() { - if [ ! -c /dev/null ]; then - echo "/dev/null is not a character device!" - exit 1 - fi -} - -wait_pid () { - pid=$1 - forever=1 - i=0 - while [ $forever -gt 0 ]; do - kill -0 $pid 1>/dev/null 2>&1 - if [ $? -eq 1 ]; then - echo "OK" - forever=0 - else - kill -TERM "$pid" - i=$((i + 1)) - if [ $i -gt 60 ]; then - echo "ERROR" - echo "Timed out while stopping (30s)" - forever=0 - else - sleep 0.5 - fi - fi - done -} - - -stop_beat () { - echo -n "Stopping celerybeat... " - if [ -f "$CELERYBEAT_PID_FILE" ]; then - wait_pid $(cat "$CELERYBEAT_PID_FILE") - else - echo "NOT RUNNING" - fi -} - -start_beat () { - echo "Starting celerybeat..." - if [ -n "$VIRTUALENV" ]; then - source $VIRTUALENV/bin/activate - fi - $CELERYBEAT $CELERYBEAT_OPTS $DAEMON_OPTS --detach \ - --pidfile="$CELERYBEAT_PID_FILE" -} - - - -case "$1" in - start) - check_dev_null - start_beat - ;; - stop) - stop_beat - ;; - reload|force-reload) - echo "Use start+stop" - ;; - restart) - echo "Restarting celery periodic task scheduler" - stop_beat - check_dev_null - start_beat - ;; - - *) - echo "Usage: /etc/init.d/celerybeat {start|stop|restart}" - exit 1 -esac - -exit 0 \ No newline at end of file diff --git a/deploy/celerybeat_just.conf b/deploy/celerybeat_just.conf deleted file mode 100644 index 2e82b9b7d..000000000 --- a/deploy/celerybeat_just.conf +++ /dev/null @@ -1,36 +0,0 @@ -# http://docs.celeryproject.org/en/latest/cookbook/daemonizing.html#generic-initd-celerybeat-example -# to be placed at /etc/defaults/celerybeat - -# Where to chdir at start. -CELERYBEAT_CHDIR="/opt/regluit/" - -# Extra arguments to celerybeat -#CELERYBEAT_OPTS="--schedule=/var/run/celerybeat-schedule" - -# Name of the celery config module.# -CELERY_CONFIG_MODULE="celeryconfig" - -# Name of the projects settings module. -export DJANGO_SETTINGS_MODULE="regluit.settings.just" - -# Path to celerybeat -CELERYBEAT="/opt/regluit/ENV/bin/django-admin.py celerybeat" - -# virtualenv to use -VIRTUALENV="/opt/regluit/ENV" - -#Full path to the PID file. Default is /var/run/celeryd.pid -CELERYBEAT_PIDFILE="/var/log/celerybeat/celerybeat.pid" - -#Full path to the celeryd log file. Default is /var/log/celeryd.log -CELERYBEAT_LOGFILE="/var/log/celerybeat/celerybeat.log" - -#Log level to use for celeryd. Default is INFO. -CELERYBEAT_LOG_LEVEL="INFO" - - -#User to run celeryd as. Default is current user. -#CELERYBEAT_USER - -#Group to run celeryd as. Default is current user. -#CELERYBEAT_GROUP diff --git a/deploy/celerybeat_localvm.conf b/deploy/celerybeat_localvm.conf deleted file mode 100644 index 4b61ec86c..000000000 --- a/deploy/celerybeat_localvm.conf +++ /dev/null @@ -1,36 +0,0 @@ -# http://docs.celeryproject.org/en/latest/cookbook/daemonizing.html#generic-initd-celerybeat-example -# to be placed at /etc/defaults/celerybeat - -# Where to chdir at start. -CELERYBEAT_CHDIR="/opt/regluit/" - -# Extra arguments to celerybeat -#CELERYBEAT_OPTS="--schedule=/var/run/celerybeat-schedule" - -# Name of the celery config module.# -CELERY_CONFIG_MODULE="celeryconfig" - -# Name of the projects settings module. -export DJANGO_SETTINGS_MODULE="regluit.settings.localvm" - -# Path to celerybeat -CELERYBEAT="/opt/regluit/ENV/bin/django-admin.py celerybeat" - -# virtualenv to use -VIRTUALENV="/opt/regluit/ENV" - -#Full path to the PID file. Default is /var/run/celeryd.pid -CELERYBEAT_PIDFILE="/var/log/celerybeat/celerybeat.pid" - -#Full path to the celeryd log file. Default is /var/log/celeryd.log -CELERYBEAT_LOGFILE="/var/log/celerybeat/celerybeat.log" - -#Log level to use for celeryd. Default is INFO. -CELERYBEAT_LOG_LEVEL="INFO" - - -#User to run celeryd as. Default is current user. -#CELERYBEAT_USER - -#Group to run celeryd as. Default is current user. -#CELERYBEAT_GROUP diff --git a/deploy/celerybeat_please.conf b/deploy/celerybeat_please.conf deleted file mode 100644 index af4b9a92e..000000000 --- a/deploy/celerybeat_please.conf +++ /dev/null @@ -1,36 +0,0 @@ -# http://docs.celeryproject.org/en/latest/cookbook/daemonizing.html#generic-initd-celerybeat-example -# to be placed at /etc/defaults/celerybeat - -# Where to chdir at start. -CELERYBEAT_CHDIR="/opt/regluit/" - -# Extra arguments to celerybeat -#CELERYBEAT_OPTS="--schedule=/var/run/celerybeat-schedule" - -# Name of the celery config module.# -CELERY_CONFIG_MODULE="celeryconfig" - -# Name of the projects settings module. -export DJANGO_SETTINGS_MODULE="regluit.settings.please" - -# Path to celerybeat -CELERYBEAT="/opt/regluit/ENV/bin/django-admin.py celerybeat" - -# virtualenv to use -VIRTUALENV="/opt/regluit/ENV" - -#Full path to the PID file. Default is /var/run/celeryd.pid -CELERYBEAT_PIDFILE="/var/log/celerybeat/celerybeat.pid" - -#Full path to the celeryd log file. Default is /var/log/celeryd.log -CELERYBEAT_LOGFILE="/var/log/celerybeat/celerybeat.log" - -#Log level to use for celeryd. Default is INFO. -CELERYBEAT_LOG_LEVEL="INFO" - - -#User to run celeryd as. Default is current user. -#CELERYBEAT_USER - -#Group to run celeryd as. Default is current user. -#CELERYBEAT_GROUP diff --git a/deploy/celerybeat_prod.conf b/deploy/celerybeat_prod.conf deleted file mode 100644 index aa0e7f941..000000000 --- a/deploy/celerybeat_prod.conf +++ /dev/null @@ -1,35 +0,0 @@ -# http://docs.celeryproject.org/en/latest/cookbook/daemonizing.html#generic-initd-celerybeat-example -# to be placed at /etc/defaults/celerybeat - -# Where to chdir at start. -CELERYBEAT_CHDIR="/opt/regluit/" - -# Extra arguments to celerybeat -#CELERYBEAT_OPTS="--schedule=/var/run/celerybeat-schedule" - -# Name of the celery config module.# -CELERY_CONFIG_MODULE="celeryconfig" - -# Name of the projects settings module. -export DJANGO_SETTINGS_MODULE="regluit.settings.prod" - -# Path to celerybeat -CELERYBEAT="/opt/regluit/ENV/bin/django-admin.py celerybeat" - -# virtualenv to use -VIRTUALENV="/opt/regluit/ENV" - -#Full path to the PID file. Default is /var/run/celeryd.pid -CELERYBEAT_PIDFILE="/var/log/celerybeat/celerybeat.pid" - -#Full path to the celeryd log file. Default is /var/log/celeryd.log -CELERYBEAT_LOGFILE="/var/log/celerybeat/celerybeat.log" - -#Log level to use for celeryd. Default is INFO. -CELERYBEAT_LOG_LEVEL="INFO" - -#User to run celeryd as. Default is current user. -#CELERYBEAT_USER - -#Group to run celeryd as. Default is current user. -#CELERYBEAT_GROUP diff --git a/deploy/celerybeat_rydev.conf b/deploy/celerybeat_rydev.conf deleted file mode 100644 index fbde6c92d..000000000 --- a/deploy/celerybeat_rydev.conf +++ /dev/null @@ -1,36 +0,0 @@ -# http://docs.celeryproject.org/en/latest/cookbook/daemonizing.html#generic-initd-celerybeat-example -# to be placed at /etc/defaults/celerybeat - -# Where to chdir at start. -CELERYBEAT_CHDIR="/home/ubuntu/regluit/" - -# Extra arguments to celerybeat -#CELERYBEAT_OPTS="--schedule=/var/run/celerybeat-schedule" - -# Name of the celery config module.# -CELERY_CONFIG_MODULE="celeryconfig" - -# Name of the projects settings module. -export DJANGO_SETTINGS_MODULE="regluit.settings.me" - -# Path to celerybeat -CELERYBEAT="/home/ubuntu/.virtualenvs/regluit/bin/django-admin.py celerybeat" - -# virtualenv to use -VIRTUALENV="/home/ubuntu/.virtualenvs/regluit" - -#Full path to the PID file. Default is /var/run/celeryd.pid -CELERYBEAT_PIDFILE="/var/log/celerybeat/celerybeat.pid" - -#Full path to the celeryd log file. Default is /var/log/celeryd.log -CELERYBEAT_LOGFILE="/var/log/celerybeat/celerybeat.log" - -#Log level to use for celeryd. Default is INFO. -CELERYBEAT_LOG_LEVEL="INFO" - - -#User to run celeryd as. Default is current user. -#CELERYBEAT_USER - -#Group to run celeryd as. Default is current user. -#CELERYBEAT_GROUP diff --git a/deploy/celeryd b/deploy/celeryd deleted file mode 100644 index 12ff8445e..000000000 --- a/deploy/celeryd +++ /dev/null @@ -1,217 +0,0 @@ -#!/bin/bash -# ============================================ -# celeryd - Starts the Celery worker daemon. -# ============================================ -# -# :Usage: /etc/init.d/celeryd {start|stop|force-reload|restart|try-restart|status} -# -# :Configuration file: /etc/default/celeryd -# -# To configure celeryd you probably need to tell it where to chdir. -# -# EXAMPLE CONFIGURATION -# ===================== -# -# this is an example configuration for a Python project: -# -# /etc/default/celeryd: -# -# # List of nodes to start -# CELERYD_NODES="worker1 worker2 worker3"k -# # ... can also be a number of workers -# CELERYD_NODES=3 -# -# # Where to chdir at start. -# CELERYD_CHDIR="/opt/Myproject/" -# -# # Extra arguments to celeryd -# CELERYD_OPTS="--time-limit=300" -# -# # Name of the celery config module.# -# CELERY_CONFIG_MODULE="celeryconfig" -# -# EXAMPLE DJANGO CONFIGURATION -# ============================ -# -# # Where the Django project is. -# CELERYD_CHDIR="/opt/Project/" -# -# # Name of the projects settings module. -# export DJANGO_SETTINGS_MODULE="settings" -# -# # Path to celeryd -# CELERYD="/opt/Project/manage.py celeryd" -# -# AVAILABLE OPTIONS -# ================= -# -# * CELERYD_NODES -# -# A space separated list of nodes, or a number describing the number of -# nodes, to start -# -# * CELERYD_OPTS -# Additional arguments to celeryd-multi, see `celeryd-multi --help` -# and `celeryd --help` for help. -# -# * CELERYD_CHDIR -# Path to chdir at start. Default is to stay in the current directory. -# -# * CELERYD_PIDFILE -# Full path to the pidfile. Default is /var/run/celeryd.pid. -# -# * CELERYD_LOGFILE -# Full path to the celeryd logfile. Default is /var/log/celeryd.log -# -# * CELERYD_LOG_LEVEL -# Log level to use for celeryd. Default is INFO. -# -# * CELERYD -# Path to the celeryd program. Default is `celeryd`. -# You can point this to an virtualenv, or even use manage.py for django. -# -# * CELERYD_USER -# User to run celeryd as. Default is current user. -# -# * CELERYD_GROUP -# Group to run celeryd as. Default is current user. - -# VARIABLE EXPANSION -# ================== -# -# The following abbreviations will be expanded -# -# * %n -> node name -# * %h -> host name - - -### BEGIN INIT INFO -# Provides: celeryd -# Required-Start: $network $local_fs $remote_fs -# Required-Stop: $network $local_fs $remote_fs -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Short-Description: celery task worker daemon -### END INIT INFO - -#set -e - -DEFAULT_PID_FILE="/var/run/celeryd@%n.pid" -DEFAULT_LOG_FILE="/var/log/celeryd@%n.log" -DEFAULT_LOG_LEVEL="INFO" -DEFAULT_NODES="celery" -DEFAULT_CELERYD="-m celery.bin.celeryd_detach" - -# /etc/init.d/celeryd: start and stop the celery task worker daemon. - -CELERY_DEFAULTS=${CELERY_DEFAULTS:-"/etc/default/celeryd"} - -test -f "$CELERY_DEFAULTS" && . "$CELERY_DEFAULTS" -if [ -f "/etc/default/celeryd" ]; then - . /etc/default/celeryd -fi - -if [ -f $VIRTUALENV_ACTIVATE ]; then - echo "activating virtualenv $VIRTUALENV_ACTIVATE" - source "$VIRTUALENV_ACTIVATE" -fi - -CELERYD_PID_FILE=${CELERYD_PID_FILE:-${CELERYD_PIDFILE:-$DEFAULT_PID_FILE}} -CELERYD_LOG_FILE=${CELERYD_LOG_FILE:-${CELERYD_LOGFILE:-$DEFAULT_LOG_FILE}} -CELERYD_LOG_LEVEL=${CELERYD_LOG_LEVEL:-${CELERYD_LOGLEVEL:-$DEFAULT_LOG_LEVEL}} -CELERYD_MULTI=${CELERYD_MULTI:-"celeryd-multi"} -CELERYD=${CELERYD:-$DEFAULT_CELERYD} -CELERYD_NODES=${CELERYD_NODES:-$DEFAULT_NODES} - -export CELERY_LOADER - -if [ -n "$2" ]; then - CELERYD_OPTS="$CELERYD_OPTS $2" -fi - -# Extra start-stop-daemon options, like user/group. -if [ -n "$CELERYD_USER" ]; then - DAEMON_OPTS="$DAEMON_OPTS --uid=$CELERYD_USER" -fi -if [ -n "$CELERYD_GROUP" ]; then - DAEMON_OPTS="$DAEMON_OPTS --gid=$CELERYD_GROUP" -fi - -if [ -n "$CELERYD_CHDIR" ]; then - DAEMON_OPTS="$DAEMON_OPTS --workdir=\"$CELERYD_CHDIR\"" -fi - - -check_dev_null() { - if [ ! -c /dev/null ]; then - echo "/dev/null is not a character device!" - exit 1 - fi -} - - -export PATH="${PATH:+$PATH:}/usr/sbin:/sbin" - - -stop_workers () { - $CELERYD_MULTI stop $CELERYD_NODES --pidfile="$CELERYD_PID_FILE" -} - - -start_workers () { - $CELERYD_MULTI start $CELERYD_NODES $DAEMON_OPTS \ - --pidfile="$CELERYD_PID_FILE" \ - --logfile="$CELERYD_LOG_FILE" \ - --loglevel="$CELERYD_LOG_LEVEL" \ - --cmd="$CELERYD" \ - $CELERYD_OPTS -} - - -restart_workers () { - $CELERYD_MULTI restart $CELERYD_NODES $DAEMON_OPTS \ - --pidfile="$CELERYD_PID_FILE" \ - --logfile="$CELERYD_LOG_FILE" \ - --loglevel="$CELERYD_LOG_LEVEL" \ - --cmd="$CELERYD" \ - $CELERYD_OPTS -} - - - -case "$1" in - start) - check_dev_null - start_workers - ;; - - stop) - check_dev_null - stop_workers - ;; - - reload|force-reload) - echo "Use restart" - ;; - - status) - celeryctl status - ;; - - restart) - check_dev_null - restart_workers - ;; - - try-restart) - check_dev_null - restart_workers - ;; - - *) - echo "Usage: /etc/init.d/celeryd {start|stop|restart|try-restart|kill}" - exit 1 - ;; -esac - -exit 0 diff --git a/deploy/celeryd_just.conf b/deploy/celeryd_just.conf deleted file mode 100644 index 3e1ee05e2..000000000 --- a/deploy/celeryd_just.conf +++ /dev/null @@ -1,11 +0,0 @@ -CELERYD_NODES="w1" -CELERYD_CHDIR="/opt/regluit/" -CELERYD_LOG_FILE="/var/log/celery/%n.log" -CELERYD_PID_FILE="/var/log/celery/%n.pid" -CELERYD_USER="celery" -CELERYD_GROUP="celery" -CELERYD="/opt/regluit/ENV/bin/django-admin.py celeryd" -CELERYD_MULTI="/opt/regluit/ENV/bin/django-admin.py celeryd_multi" - -VIRTUALENV_ACTIVATE="/opt/regluit/ENV/bin/activate" -export DJANGO_SETTINGS_MODULE="regluit.settings.just" diff --git a/deploy/celeryd_localvm.conf b/deploy/celeryd_localvm.conf deleted file mode 100644 index b0ba83243..000000000 --- a/deploy/celeryd_localvm.conf +++ /dev/null @@ -1,11 +0,0 @@ -CELERYD_NODES="w1" -CELERYD_CHDIR="/opt/regluit/" -CELERYD_LOG_FILE="/var/log/celery/%n.log" -CELERYD_PID_FILE="/var/log/celery/%n.pid" -CELERYD_USER="celery" -CELERYD_GROUP="celery" -CELERYD="/opt/regluit/ENV/bin/django-admin.py celeryd" -CELERYD_MULTI="/opt/regluit/ENV/bin/django-admin.py celeryd_multi" - -VIRTUALENV_ACTIVATE="/opt/regluit/ENV/bin/activate" -export DJANGO_SETTINGS_MODULE="regluit.settings.localvm" diff --git a/deploy/celeryd_please.conf b/deploy/celeryd_please.conf deleted file mode 100644 index 8c41c95ac..000000000 --- a/deploy/celeryd_please.conf +++ /dev/null @@ -1,11 +0,0 @@ -CELERYD_NODES="w1" -CELERYD_CHDIR="/opt/regluit/" -CELERYD_LOG_FILE="/var/log/celery/%n.log" -CELERYD_PID_FILE="/var/log/celery/%n.pid" -CELERYD_USER="celery" -CELERYD_GROUP="celery" -CELERYD="/opt/regluit/ENV/bin/django-admin.py celeryd" -CELERYD_MULTI="/opt/regluit/ENV/bin/django-admin.py celeryd_multi" - -VIRTUALENV_ACTIVATE="/opt/regluit/ENV/bin/activate" -export DJANGO_SETTINGS_MODULE="regluit.settings.please" diff --git a/deploy/celeryd_prod.conf b/deploy/celeryd_prod.conf deleted file mode 100644 index 476e607fc..000000000 --- a/deploy/celeryd_prod.conf +++ /dev/null @@ -1,11 +0,0 @@ -CELERYD_NODES="w1" -CELERYD_CHDIR="/opt/regluit/" -CELERYD_LOG_FILE="/var/log/celery/%n.log" -CELERYD_PID_FILE="/var/log/celery/%n.pid" -CELERYD_USER="celery" -CELERYD_GROUP="celery" -CELERYD="/opt/regluit/ENV/bin/django-admin.py celeryd" -CELERYD_MULTI="/opt/regluit/ENV/bin/django-admin.py celeryd_multi" - -VIRTUALENV_ACTIVATE="/opt/regluit/ENV/bin/activate" -export DJANGO_SETTINGS_MODULE="regluit.settings.prod" diff --git a/deploy/celeryd_rydev.conf b/deploy/celeryd_rydev.conf deleted file mode 100755 index 9c89e6ece..000000000 --- a/deploy/celeryd_rydev.conf +++ /dev/null @@ -1,11 +0,0 @@ -CELERYD_NODES="w1" -CELERYD_CHDIR="/home/ubuntu/regluit/" -CELERYD_LOG_FILE="/var/log/celery/%n.log" -CELERYD_PID_FILE="/var/log/celery/%n.pid" -CELERYD_USER="celery" -CELERYD_GROUP="celery" -CELERYD="/home/ubuntu/.virtualenvs/regluit/bin/django-admin.py celeryd" -CELERYD_MULTI="/home/ubuntu/.virtualenvs/regluit/bin/django-admin.py celeryd_multi" - -VIRTUALENV_ACTIVATE="/home/ubuntu/.virtualenvs/regluit/bin/activate" -export DJANGO_SETTINGS_MODULE="regluit.settings.me" diff --git a/deploy/crontab_just.txt b/deploy/crontab_just.txt deleted file mode 100644 index 4b8386fec..000000000 --- a/deploy/crontab_just.txt +++ /dev/null @@ -1,24 +0,0 @@ -# Edit this file to introduce tasks to be run by cron. -# -# Each task to run has to be defined through a single line -# indicating with different fields when the task will be run -# and what command to run for the task -# -# To define the time you can provide concrete values for -# minute (m), hour (h), day of month (dom), month (mon), -# and day of week (dow) or use '*' in these fields (for 'any').# -# Notice that tasks will be started based on the cron's system -# daemon's notion of time and timezones. -# -# Output of the crontab jobs (including errors) is sent through -# email to the user the crontab file belongs to (unless redirected). -# -# For example, you can run a backup of all your user accounts -# at 5 a.m every week with: -# 0 5 * * 1 tar -zcf /var/backups/home.tgz /home/ -# -# For more information see the manual pages of crontab(5) and cron(8) -# -# m h dom mon dow command -* * * * * cd /opt/regluit; . /opt/regluit/ENV/bin/activate; /opt/regluit/ENV/bin/django-admin.py emit_notices --settings=regluit.settings.just > /opt/regluit/deploy/emit_notices.log 2>&1 ; touch /opt/regluit/deploy/last-cron -@reboot sudo mkdir /var/run/celery; sudo chown celery:celery /var/log/celery /var/run/celery; cd /opt/regluit; . /opt/regluit/ENV/bin/activate; /opt/regluit/ENV/bin/django-admin.py celeryd_multi restart w1 --settings=regluit.settings.just; /etc/init.d/celerybeat restart; diff --git a/deploy/crontab_localvm.txt b/deploy/crontab_localvm.txt deleted file mode 100644 index ef35f2a16..000000000 --- a/deploy/crontab_localvm.txt +++ /dev/null @@ -1,24 +0,0 @@ -# Edit this file to introduce tasks to be run by cron. -# -# Each task to run has to be defined through a single line -# indicating with different fields when the task will be run -# and what command to run for the task -# -# To define the time you can provide concrete values for -# minute (m), hour (h), day of month (dom), month (mon), -# and day of week (dow) or use '*' in these fields (for 'any').# -# Notice that tasks will be started based on the cron's system -# daemon's notion of time and timezones. -# -# Output of the crontab jobs (including errors) is sent through -# email to the user the crontab file belongs to (unless redirected). -# -# For example, you can run a backup of all your user accounts -# at 5 a.m every week with: -# 0 5 * * 1 tar -zcf /var/backups/home.tgz /home/ -# -# For more information see the manual pages of crontab(5) and cron(8) -# -# m h dom mon dow command -* * * * * cd /opt/regluit; . /opt/regluit/ENV/bin/activate; /opt/regluit/ENV/bin/django-admin.py emit_notices --settings=regluit.settings.localvm > /opt/regluit/deploy/emit_notices.log 2>&1 ; touch /opt/regluit/deploy/last-cron -@reboot sudo mkdir /var/run/celery; sudo chown celery:celery /var/log/celery /var/run/celery; cd /opt/regluit; . /opt/regluit/ENV/bin/activate; /opt/regluit/ENV/bin/django-admin.py celeryd_multi restart w1 --settings=regluit.settings.localvm; /etc/init.d/celerybeat restart; \ No newline at end of file diff --git a/deploy/crontab_please.txt b/deploy/crontab_please.txt deleted file mode 100644 index bd975eec6..000000000 --- a/deploy/crontab_please.txt +++ /dev/null @@ -1,24 +0,0 @@ -# Edit this file to introduce tasks to be run by cron. -# -# Each task to run has to be defined through a single line -# indicating with different fields when the task will be run -# and what command to run for the task -# -# To define the time you can provide concrete values for -# minute (m), hour (h), day of month (dom), month (mon), -# and day of week (dow) or use '*' in these fields (for 'any').# -# Notice that tasks will be started based on the cron's system -# daemon's notion of time and timezones. -# -# Output of the crontab jobs (including errors) is sent through -# email to the user the crontab file belongs to (unless redirected). -# -# For example, you can run a backup of all your user accounts -# at 5 a.m every week with: -# 0 5 * * 1 tar -zcf /var/backups/home.tgz /home/ -# -# For more information see the manual pages of crontab(5) and cron(8) -# -# m h dom mon dow command -* * * * * cd /opt/regluit; . /opt/regluit/ENV/bin/activate; /opt/regluit/ENV/bin/django-admin.py emit_notices --settings=regluit.settings.please > /opt/regluit/deploy/emit_notices.log 2>&1 ; touch /opt/regluit/deploy/last-cron -@reboot sudo mkdir /var/run/celery; sudo chown celery:celery /var/log/celery /var/run/celery; cd /opt/regluit; . /opt/regluit/ENV/bin/activate; /opt/regluit/ENV/bin/django-admin.py celeryd_multi restart w1 --settings=regluit.settings.please; /etc/init.d/celerybeat restart; diff --git a/deploy/crontab_prod.txt b/deploy/crontab_prod.txt deleted file mode 100644 index ca416808d..000000000 --- a/deploy/crontab_prod.txt +++ /dev/null @@ -1,24 +0,0 @@ -# Edit this file to introduce tasks to be run by cron. -# -# Each task to run has to be defined through a single line -# indicating with different fields when the task will be run -# and what command to run for the task -# -# To define the time you can provide concrete values for -# minute (m), hour (h), day of month (dom), month (mon), -# and day of week (dow) or use '*' in these fields (for 'any').# -# Notice that tasks will be started based on the cron's system -# daemon's notion of time and timezones. -# -# Output of the crontab jobs (including errors) is sent through -# email to the user the crontab file belongs to (unless redirected). -# -# For example, you can run a backup of all your user accounts -# at 5 a.m every week with: -# 0 5 * * 1 tar -zcf /var/backups/home.tgz /home/ -# -# For more information see the manual pages of crontab(5) and cron(8) -# -# m h dom mon dow command -* * * * * /opt/regluit/deploy/emit_notices.sh -@reboot sudo mkdir /var/run/celery; sudo chown celery:celery /var/log/celery /var/run/celery; cd /opt/regluit; . /opt/regluit/ENV/bin/activate; /opt/regluit/ENV/bin/django-admin.py celeryd_multi restart w1 --settings=regluit.settings.prod; /etc/init.d/celerybeat restart; diff --git a/deploy/localvm.conf b/deploy/localvm.conf deleted file mode 100644 index e862ef813..000000000 --- a/deploy/localvm.conf +++ /dev/null @@ -1,59 +0,0 @@ -WSGIPythonHome /opt/regluit/ENV -WSGISocketPrefix /opt/regluit - -<VirtualHost *:80> - -ServerName localvm -ServerAdmin info@ebookfoundation.org - -Redirect permanent / https://192.168.33.10.xip.io:443/ - -</VirtualHost> - -<VirtualHost _default_:443> - -SSLEngine on -ServerName localvm:443 - -# generated using https://mozilla.github.io/server-side-tls/ssl-config-generator/ -# intermediate mode -# 2015.03.04 (with Apache v 2.2.22 and OpenSSL 1.0.1 and HSTS enabled) - -SSLProtocol all -SSLv2 -SSLv3 -SSLCipherSuite ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA -SSLHonorCipherOrder on - -# HSTS (mod_headers is required) (15768000 seconds = 6 months) -Header always add Strict-Transport-Security "max-age=15768000" - -SSLCertificateFile /etc/ssl/certs/server.crt -SSLCertificateKeyFile /etc/ssl/private/server.key - -WSGIDaemonProcess regluit-ssl processes=4 threads=4 python-eggs=/tmp/regluit-python-eggs -WSGIScriptAlias / /opt/regluit/deploy/localvm.wsgi - -<Directory /opt/regluit> - Options Indexes FollowSymLinks - AllowOverride None - - Order allow,deny - Allow from all -</Directory> - -<Directory /opt/regluit/static> - Options Indexes FollowSymLinks - AllowOverride None - - Order allow,deny - Allow from all -</Directory> - -Alias /static /var/www/static - -BrowserMatch "MSIE [2-6]" \ - nokeepalive ssl-unclean-shutdown \ - downgrade-1.0 force-response-1.0 -# MSIE 7 and newer should be able to use keepalive -BrowserMatch "MSIE [17-9]" ssl-unclean-shutdown - -</VirtualHost> diff --git a/deploy/setup-just.sh b/deploy/setup-just.sh deleted file mode 100644 index 216f7a8cb..000000000 --- a/deploy/setup-just.sh +++ /dev/null @@ -1,3 +0,0 @@ -cd /opt/regluit -source ENV/bin/activate -export DJANGO_SETTINGS_MODULE=regluit.settings.just diff --git a/deploy/setup-please.sh b/deploy/setup-please.sh deleted file mode 100644 index 0815d3612..000000000 --- a/deploy/setup-please.sh +++ /dev/null @@ -1,3 +0,0 @@ -cd /opt/regluit -source ENV/bin/activate -export DJANGO_SETTINGS_MODULE=regluit.settings.please diff --git a/distro/migrations/0001_initial.py b/distro/migrations/0001_initial.py index 1a4fbca15..44ef8b67f 100644 --- a/distro/migrations/0001_initial.py +++ b/distro/migrations/0001_initial.py @@ -41,6 +41,6 @@ class Migration(migrations.Migration): migrations.AddField( model_name='deposit', name='target', - field=models.ForeignKey(related_name='deposits', to='distro.Target'), + field=models.ForeignKey(on_delete=models.CASCADE, related_name='deposits', to='distro.Target'), ), ] diff --git a/distro/migrations/0002_auto_20200214_1347.py b/distro/migrations/0002_auto_20200214_1347.py new file mode 100644 index 000000000..a93207a12 --- /dev/null +++ b/distro/migrations/0002_auto_20200214_1347.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.11.28 on 2020-02-14 13:47 +from __future__ import unicode_literals + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('distro', '0001_initial'), + ] + + operations = [ + migrations.AlterField( + model_name='target', + name='protocol', + field=models.CharField(default='ftp', max_length=10), + ), + ] diff --git a/distro/models.py b/distro/models.py index 5e9adb4b0..bb5df592e 100644 --- a/distro/models.py +++ b/distro/models.py @@ -1,7 +1,7 @@ import logging import requests from ftplib import FTP, FTP_TLS -from StringIO import StringIO +from io import StringIO from django.db import models logger = logging.getLogger(__name__) @@ -14,7 +14,7 @@ class Target(models.Model): protocol = models.CharField(max_length=10, default='ftp') formats = models.ManyToManyField('Format', related_name='targets') - def __unicode__(self): + def __str__(self): return self.name def get_ftp(self): @@ -57,7 +57,7 @@ def push_file(self, filename, file_to_push): class Deposit(models.Model): - target = models.ForeignKey(Target, related_name="deposits") + target = models.ForeignKey(Target, on_delete=models.CASCADE, related_name="deposits") isbn = models.CharField(max_length=13) format = models.CharField(max_length=30) updated = models.DateTimeField(auto_now_add=True) @@ -65,5 +65,5 @@ class Deposit(models.Model): class Format(models.Model): name = models.CharField(max_length=4) - def __unicode__(self): + def __str__(self): return self.name diff --git a/distro/push.py b/distro/push.py index 30c4fb4b1..4a485ff95 100644 --- a/distro/push.py +++ b/distro/push.py @@ -1,11 +1,12 @@ import logging from datetime import datetime -from StringIO import StringIO +from io import StringIO from regluit.core.facets import BaseFacet -from regluit.core.models import Work, good_providers +from regluit.core.models import Work from regluit.api.onix import onix_feed +from regluit.core.parameters import GOOD_PROVIDERS from .models import Target @@ -45,7 +46,7 @@ def __init__(self): editions__ebooks__created__gt = start, identifiers__type="isbn", editions__ebooks__format__in = formats, - editions__ebooks__provider__in = good_providers, + editions__ebooks__provider__in = GOOD_PROVIDERS, ).distinct().order_by('-featured') model_filters = {"Ebook": format_filter, "Edition": edition_format_filter} @@ -56,7 +57,10 @@ def __init__(self): return TargetFacet() def push_onix(target, facet_class): - target.push_file('unglueit_onix_{:%Y%m%d%H%M%S}.xml'.format(datetime.now()),StringIO(onix_feed(facet_class))) + target.push_file( + 'unglueit_onix_{:%Y%m%d%H%M%S}.xml'.format(datetime.now()), + StringIO(onix_feed(facet_class)) + ) def push_all(start=datetime(1900,1,1), new=False, max=0): for target in Target.objects.all(): diff --git a/frontend/forms/__init__.py b/frontend/forms/__init__.py index 59b132793..5f7afb8e6 100644 --- a/frontend/forms/__init__.py +++ b/frontend/forms/__init__.py @@ -12,8 +12,7 @@ from django import forms from django.conf import settings from django.contrib.auth.models import User -from django.forms.widgets import RadioSelect -from django.forms.extras.widgets import SelectDateWidget +from django.forms.widgets import RadioSelect, SelectDateWidget from django.utils.translation import ugettext_lazy as _ from selectable.forms import ( @@ -38,8 +37,6 @@ Work, Press, Libpref, - TWITTER, - FACEBOOK, UNGLUEITAR ) from regluit.libraryauth.models import Library @@ -48,6 +45,7 @@ REWARDS, BUY2UNGLUE, THANKS, + DONATION_CHOICES, ) from regluit.core.lookups import ( OwnerLookup, @@ -70,33 +68,9 @@ RightsHolderForm, UserClaimForm ) -from questionnaire.models import Questionnaire logger = logging.getLogger(__name__) -class SurveyForm(forms.Form): - label = forms.CharField(max_length=64, required=True) - survey = forms.ModelChoiceField(Questionnaire.objects.all(), widget=RadioSelect(), empty_label=None, required = True,) - isbn = ISBNField( - label=_("ISBN"), - max_length=17, - required = False, - help_text = _("13 digits, no dash."), - error_messages = { - 'invalid': _("This must be a valid ISBN-13."), - } - ) - - def clean_isbn(self): - isbn = self.cleaned_data['isbn'] - if not isbn: - return '' - try: - self.work = Identifier.objects.get(type='isbn', value=isbn).work - return isbn - except Identifier.DoesNotExist: - self.work = None - raise forms.ValidationError( 'That ISBN is not in our database') class EbookFileForm(forms.ModelForm): file = forms.FileField(max_length=16777216) @@ -110,7 +84,7 @@ def __init__(self, campaign_type=BUY2UNGLUE, *args, **kwargs): self.fields['format'].widget = forms.HiddenInput() if campaign_type == THANKS: self.fields['format'].widget = forms.Select( - choices = (('pdf', 'PDF'), ('epub', 'EPUB'), ('mobi', 'MOBI')) + choices = (('pdf', 'PDF'), ('epub', 'EPUB')) ) def clean_version_label(self): @@ -182,13 +156,11 @@ def clean(self): return self.cleaned_data class ProfileForm(forms.ModelForm): - clear_facebook = forms.BooleanField(required=False) clear_twitter = forms.BooleanField(required=False) - clear_goodreads = forms.BooleanField(required=False) class Meta: model = UserProfile - fields = 'tagline', 'librarything_id', 'home_url', 'clear_facebook', 'clear_twitter', 'clear_goodreads', 'avatar_source' + fields = 'tagline', 'librarything_id', 'home_url', 'avatar_source' widgets = { 'tagline': forms.Textarea(attrs={'rows': 5, 'onKeyUp': "counter(this, 140)", 'onBlur': "counter(this, 140)"}), } @@ -198,22 +170,9 @@ def __init__(self, *args, **kwargs): super(ProfileForm, self).__init__(*args, **kwargs) choices = [] for choice in self.fields['avatar_source'].choices : - if choice[0] == FACEBOOK and not profile.facebook_id: - pass - elif choice[0] == TWITTER and not profile.twitter_id: - pass - else: - choices.append(choice) + choices.append(choice) self.fields['avatar_source'].choices = choices - def clean(self): - # check that if a social net is cleared, we're not using it a avatar source - if self.cleaned_data.get("clear_facebook", False) and self.cleaned_data.get("avatar_source", None) == FACEBOOK: - self.cleaned_data["avatar_source"] == UNGLUEITAR - if self.cleaned_data.get("clear_twitter", False) and self.cleaned_data.get("avatar_source", None) == TWITTER: - self.cleaned_data["avatar_source"] == UNGLUEITAR - return self.cleaned_data - def getTransferCreditForm(maximum, data=None, *args, **kwargs ): class TransferCreditForm(forms.Form): recipient = AutoCompleteSelectField( @@ -292,9 +251,7 @@ class Meta: class CampaignPurchaseForm(forms.Form): - anonymous = forms.BooleanField(required=False, - label_suffix='', - label=_("Make this purchase anonymous")) + anonymous = forms.BooleanField(required=False, label=_("Make this purchase anonymous, please")) offer_id = forms.IntegerField(required=False) offer = None library_id = forms.IntegerField(required=False) @@ -359,12 +316,11 @@ def trans_extra(self): class CampaignThanksForm(forms.Form): anonymous = forms.BooleanField( required=False, - label_suffix='', - label=_("Make this contribution anonymous") + label=_("Make this contribution anonymous, please") ) preapproval_amount = forms.DecimalField( required = True, - min_value=D('1.00'), + min_value=D('2.00'), max_value=D('2000.00'), decimal_places=2, label="Pledge Amount", @@ -374,19 +330,21 @@ def trans_extra(self): pe = PledgeExtra( anonymous=self.cleaned_data['anonymous'] ) class DonationForm(forms.Form): + # used only for validation; not currently used for display amount = forms.DecimalField( required = True, - min_value=D('1.00'), + min_value=D('5.00'), max_value=D('20000.00'), decimal_places=2, label="Donation Amount", ) + reason = forms.ChoiceField(choices=DONATION_CHOICES, required=False) class CampaignPledgeForm(forms.Form): preapproval_amount = forms.DecimalField( required = False, - min_value=D('1.00'), + min_value=D('2.00'), max_value=D('5000.00'), decimal_places=2, label="Support Amount", @@ -394,10 +352,7 @@ class CampaignPledgeForm(forms.Form): def amount(self): return self.cleaned_data["preapproval_amount"] if self.cleaned_data else None - anonymous = forms.BooleanField( - required=False, - label_suffix='', - label=_("Make this support anonymous")) + anonymous = forms.BooleanField(required=False, label=_("Make this support anonymous, please")) ack_name = forms.CharField( required=False, max_length=64, @@ -454,6 +409,7 @@ class TokenCCMixin(forms.Form): class BaseCCMixin(forms.Form): work_id = forms.IntegerField(required=False, widget=forms.HiddenInput()) + reason = forms.CharField(required=False, widget=forms.HiddenInput()) preapproval_amount = forms.DecimalField( required=False, min_value=D('1.00'), @@ -479,11 +435,6 @@ class CCForm(UserCCMixin, BaseCCForm): class AccountCCForm( BaseCCMixin, UserCCMixin, forms.Form): pass -class GoodreadsShelfLoadingForm(forms.Form): - goodreads_shelf_name_number = forms.CharField(widget=forms.Select(choices=( - ('all','all'), - ))) - class LibraryThingForm(forms.Form): lt_username = forms.CharField(max_length=30, required=True) @@ -548,14 +499,14 @@ class MsgForm(forms.Form): def full_clean(self): super(MsgForm, self).full_clean() - if self.data.has_key("supporter"): + if "supporter" in self.data: try: self.cleaned_data['supporter'] = User.objects.get(id=self.data["supporter"]) except User.DoesNotExist: raise ValidationError("Supporter does not exist") else: raise ValidationError("Supporter is not specified") - if self.data.has_key("work"): + if "work" in self.data: try: self.cleaned_data['work'] = Work.objects.get(id=self.data["work"]) except Work.DoesNotExist: diff --git a/frontend/forms/bibforms.py b/frontend/forms/bibforms.py index 54881461b..873a676a3 100644 --- a/frontend/forms/bibforms.py +++ b/frontend/forms/bibforms.py @@ -62,7 +62,10 @@ def clean(self): id_value = self.cleaned_data.get('id_value', '').strip() make_new = self.cleaned_data.get('make_new', False) if not make_new: - self.cleaned_data['id_value'] = identifier_cleaner(id_type)(id_value) + if id_value: + self.cleaned_data['id_value'] = identifier_cleaner(id_type)(id_value) + if not self.cleaned_data['id_value']: + self.add_error('id_value', forms.ValidationError('The identifier was not valid')) return self.cleaned_data class Meta: @@ -165,7 +168,7 @@ def clean(self): else: err_msg = "{} is a duplicate for work #{}.".format(id_value, ident.work_id) self.add_error('id_value', forms.ValidationError(err_msg)) - except forms.ValidationError, ve: + except forms.ValidationError as ve: self.add_error( 'id_value', forms.ValidationError('{}: {}'.format(ve.message, id_value)) diff --git a/frontend/forms/rh_forms.py b/frontend/forms/rh_forms.py index 09a9212dd..1a172107c 100644 --- a/frontend/forms/rh_forms.py +++ b/frontend/forms/rh_forms.py @@ -1,7 +1,7 @@ from datetime import date, timedelta from decimal import Decimal as D -from ckeditor.widgets import CKEditorWidget +from ckeditor_uploader.widgets import CKEditorUploadingWidget from selectable.forms import ( AutoCompleteSelectMultipleWidget, @@ -10,8 +10,7 @@ from django import forms from django.conf import settings -from django.forms.extras.widgets import SelectDateWidget -from django.forms.widgets import RadioSelect +from django.forms.widgets import RadioSelect, SelectDateWidget from django.utils.translation import ugettext_lazy as _ from django.utils.timezone import now @@ -206,7 +205,7 @@ def __init__(self, instance=None , **kwargs): required=False, ) if self.initial and not self.initial.get('edition', None) and not instance.edition: - self.initial['edition'] = instance.work.editions.all()[0] + self.initial['edition'] = instance.work.editions.first() paypal_receiver = forms.EmailField( label=_("contact email address for this campaign"), @@ -215,7 +214,7 @@ def __init__(self, instance=None , **kwargs): 'required': 'You must enter the email we should contact you at for this campaign.' }, ) - work_description = forms.CharField(required=False , widget=CKEditorWidget()) + work_description = forms.CharField(required=False , widget=CKEditorUploadingWidget()) class Meta: model = Campaign diff --git a/frontend/tasks.py b/frontend/tasks.py new file mode 100644 index 000000000..75c24cae6 --- /dev/null +++ b/frontend/tasks.py @@ -0,0 +1,17 @@ +from os.path import join +from datetime import date + +from celery.task import task + +from django.conf import settings +from django.template.loader import get_template, render_to_string + +from .views import InfoPageView + +@task +def save_info_page(): + page_view = InfoPageView() + page = render_to_string(page_view.template_name, context=page_view.get_context_data()) + today = date.today().isoformat() + with open(join(settings.CELERY_LOG_DIR, 'metrics-%s.html' % today), 'w') as todays_metrics: + todays_metrics.write(page) diff --git a/frontend/templates/503.html b/frontend/templates/503.html index 4403b25d5..e2b47b1ec 100644 --- a/frontend/templates/503.html +++ b/frontend/templates/503.html @@ -11,7 +11,7 @@ <h1>Unglue.it is currently undergoing maintenance</h1> <p> -While you wait, why not like us on <a href="https://facebook.com/unglueit">Facebook</a>, follow us on <a href="https://twitter.com/unglueit">Twitter</a>, or subscribe to our <a href="https://blog.unglue.it">blog</a>? We'll keep you up to date there with our progress fixing things. +While you wait, why not like us on <a href="https://facebook.com/unglueit">Facebook</a>, follow us on <a href="https://digipres.club/@unglueit">Mastodon</a>,h or subscribe to our <a href="https://blog.unglue.it">blog</a>? We'll keep you up to date there with our progress fixing things. </p> <p>You can also help us by <a href="{% url 'feedback' %}">sending us feedback</a>.</p> diff --git a/frontend/templates/_template_map.txt b/frontend/templates/_template_map.txt index fe1abdb94..bb9f5dffa 100644 --- a/frontend/templates/_template_map.txt +++ b/frontend/templates/_template_map.txt @@ -11,13 +11,11 @@ base.html extra_css(empty) extra_js(empty) extra_head(empty) about_unglued_empty.html about_wishlist.html about_wishlist_empty.html - base-questionnaire.html campaign_list.html extra_css extra_head cc_list.html extra_css extra_head comments.html extra_css extra_head download.html extra_js faceted_list.html extra_css extra_head - goodreads_display.html extra_head home.html extra_css extra_js kindle_change_successful.html extra_js libraryauth/library.html extra_head extra_css extra_js @@ -59,7 +57,6 @@ base.html extra_css(empty) extra_js(empty) extra_head(empty) manage_account.html extra_extra_head manage_campaign.html extra_extra_head manage_ebooks.html - manage_survey.html marc.html merge.html extra_extra_head metrics.html @@ -78,7 +75,6 @@ base.html extra_css(empty) extra_js(empty) extra_head(empty) rh_works.html rh_yours.html rights_holders.html extra_extra_head - surveys.html terms.html extra_css thanks.html basepledge.html extra_css extra_js extra_extra_head(empty) diff --git a/frontend/templates/about_funds.html b/frontend/templates/about_funds.html new file mode 100644 index 000000000..231cb6970 --- /dev/null +++ b/frontend/templates/about_funds.html @@ -0,0 +1,66 @@ +{% extends 'basedocumentation.html' %} +{% load sass_tags %} +{% block title %} Free Ebook Foundation Donations {% endblock %} +{% block extra_css %} +<link type="text/css" rel="stylesheet" href="{% sass_src 'scss/pledge.scss' %}" /> +{% endblock %} + +{% block topsection %} +{% endblock %} + +{% block doccontent %} + +<h2> Donating to Unglue.it </h2> +<p> +Unglue.it is a program of the <a href="https://ebookfoundation.org">Free Ebook Foundation</a> (FEF), which is a charitable, not-for-profit corporation. +Donations to the Free Ebook Foundation are tax-deductible in the United States. +</p> +<p> +When you donate to the Free Ebook Foundation, you can specify how you would like your donation to be used. There are currently two options: +</p> +<ul class="bullets"> +<li> <a href="#monographs">The FEF Open Access Monographs Fund</a>: to support the Ungluing of peer-reviewed monographs that advance scholarship, science and learning </li> +<li> <a href="#general">The FEF General Fund</a>: to support the operation and maintenance of the Foundation's programs, including Unglue.it. +</ul> +<h3 id="monographs">The FEF Open Access Monograph Fund</h3> +<p> +Scholars write books to spread their ideas, so it makes sense to make them free and available. +We refer to these books as "monographs" because they usually embody the scholarship of a single author. +Already, over 30,000 of these books are available to download from the Unglue.it database. +Sadly, many more books are locked up behind paywalls - not because their authors want to make money, but because the publishers of these books need to recoup the cost of editorial work and design. +Many new books will remain unpublished because publishers committed to Open Access have insufficient resources to publish all the books deserving of wider audiences. +</p> +<p> +As a small step towards addressing these needs, we're offering donors a chance to help us unglue more of these monographs by donating to a special fund. +The fund will be used to match contributions to qualified ungluing campaigns on Unglue.it. +To participate, authors should first work with a publisher to establish a campaign target, and then create an ungluing campaign. +To get started, follow the steps at our <a href="{% url 'rightsholders' %}">right holder tools page</a>. +Our staff will verify that the book has been or will be peer-reviewed and advances scholarship, science and learning. +Resources from the fund will be allocated to maximize the success of the eligible campaigns. +If you want to donate to a specific campaign, just donate to the campaign directly. +</p> +<h3 id="general">The FEF General Fund</h3> +<p> +If you prefer to support all the work of the Free Ebook Foundation, including Unglue.it, Free-Programming-Books, and our work supporting Project Gutenberg, just use the General Fund. +</p> + +<h2 id="donationform">Donate Now!</h2> + <div id="authorize" class="jsmod-content" > + <form class="askform" method="POST" action="{% url 'newdonation' %}"> + <p class=" form-row clearfix"> + <input id="id_reason_monographs" checked type="radio" value="monographs" name="reason"><label for="id_reason_monographs">FEF Open Access Monographs Fund</label> + </p> <p class=" form-row clearfix"> + <input id="id_reason_general" type="radio" value="general" name="reason"><label for="id_reason_general">FEF General Fund</label> + </p> + <div class="donate_amount clearfix"> + <label>Amount ($): </label><input id="amount" max="20000.00" min="5.00" name="amount" step="0.01" type="number" value="10.00" class="donate"></div> + <div class="button"> + <input name="pledge" type="submit" value="Donate" id="donatesubmit" class="donate" /> + </div> + + </form> + + </div> + + +{% endblock %} diff --git a/frontend/templates/add_your_books.html b/frontend/templates/add_your_books.html index b285e3d5d..99915eb51 100644 --- a/frontend/templates/add_your_books.html +++ b/frontend/templates/add_your_books.html @@ -1,3 +1,5 @@ + <script type="text/javascript" src="/static/js/watermark_init.js"></script> + <script type="text/javascript" src="/static/js/watermark_change.js"></script> <p id="add_your_books"><b>Claiming a work</b></p> <p>If your book is indexed in Google books, we can add it to our database automagically. Click on the result list to add your book to our database.</p> <form action="{% url 'search' %}" method="get"> diff --git a/frontend/templates/agreed.html b/frontend/templates/agreed.html index f2d8535b0..4b16772b1 100644 --- a/frontend/templates/agreed.html +++ b/frontend/templates/agreed.html @@ -3,7 +3,7 @@ {% block title %} Agreement Submitted {% endblock %} {% block extra_extra_head %} {{ block.super }} -<link rel="stylesheet" href="/static/css/ui-lightness/jquery-ui-1.8.16.custom.css" type="text/css" media="screen"> +<link rel="stylesheet" href="{{ jquery_ui_theme }}" type="text/css" media="screen"> <script type="text/javascript" src="{{ jquery_ui_home }}"></script> {% endblock %} diff --git a/frontend/templates/base-questionnaire.html b/frontend/templates/base-questionnaire.html deleted file mode 100644 index 961b60a60..000000000 --- a/frontend/templates/base-questionnaire.html +++ /dev/null @@ -1,38 +0,0 @@ -{% extends "base.html" %} -{% load landings %} -{% block title %}{{ block.super }}Questionnaire{% endblock title %} -{% block search_box %} -{% render_with_landing '' %} -<a href="{{landing_object.publishers.0.url}}"><img style="float:left;margin:10px" src="{{landing_object.publishers.0.logo_url}}" alt="{{landing_object.publishers.0.name}}" /></a> -{% endblock %} -{% block signin %} -{% endblock %} -{% block extra_css %} - <link rel="stylesheet" href="/static/bootstrap/bootstrap.min.css" type="text/css" /> - <link rel="stylesheet" href="/static/questionnaire.css" /> - <style type="text/css"> - {% block styleextra %} - {% endblock %} - </style> -{% endblock %} -{% block extra_head %} - {% block headextra %} - {% endblock %} -{% endblock %} - -{% block language %} - {% for lang in LANGUAGES %} - {% if not forloop.first %} | {% endif %} - <a href="/setlang/?lang={{ lang.0 }}&next={{ request.path }}">{{ lang.1 }}</a> - {% endfor %} -{% endblock language %} - -{% block content %} -<div id="main-container"> - <div class="js-main"> - - {% block questionnaire %}{% endblock questionnaire %} - </div> -</div> - -{% endblock %} diff --git a/frontend/templates/base.html b/frontend/templates/base.html index 95dc6115e..09d98b713 100644 --- a/frontend/templates/base.html +++ b/frontend/templates/base.html @@ -1,28 +1,20 @@ <!DOCTYPE html> -{% load truncatechars %}{% load sass_tags %} +{% load sass_tags %} -<html lang="en"> +<html> <head> <meta charset="utf-8" /> - <meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="referrer" content="origin" /> <title>unglue.it {% block title %}{% endblock %} - {% block extra_meta %}{% endblock %} + {% block extra_css %}{% endblock %} - - - - - - + - - - + {% block extra_js %} {% endblock %} {% if show_langs %} @@ -51,86 +43,84 @@ -
    -
- {% ifequal campaign_list.count 0 %} + {% if not campaign_list.exists %} There aren't any {{ facet_label }}{{ pub_lang|ez_lang_name }} ungluing campaigns right now. If you're an author, publisher, or other rights holder, you can start one. {% else %} {% lazy_paginate 20 campaign_list using "campaign_list" %} @@ -100,7 +101,7 @@ {{ page.number }} {% endfor %}
- {% endifequal %} + {% endif %} {% if request.user.libpref %} diff --git a/frontend/templates/cardform.html b/frontend/templates/cardform.html index 2a2d4afea..8ec2755a7 100644 --- a/frontend/templates/cardform.html +++ b/frontend/templates/cardform.html @@ -26,7 +26,7 @@

{{ action|capfirst }} by Credit Card

-
+
{{ form.as_p }} {% csrf_token %} {{ form.non_field_errors }} diff --git a/frontend/templates/cardscripts.html b/frontend/templates/cardscripts.html index 8b82205f1..399852c09 100644 --- a/frontend/templates/cardscripts.html +++ b/frontend/templates/cardscripts.html @@ -6,6 +6,8 @@ $j(document).ready(function() { // don't let users modify their pledge amount on this page; it's just here for reference // if they modified it here we'd have to faff about with validating premiums + if($j('#id_work_id').prop('value')!=""){ $j('#id_preapproval_amount').prop('disabled', true); + } }); diff --git a/frontend/templates/cc_list.html b/frontend/templates/cc_list.html index bfd4359b8..1fc6e1e0f 100644 --- a/frontend/templates/cc_list.html +++ b/frontend/templates/cc_list.html @@ -14,6 +14,7 @@ {% block extra_head %} + @@ -74,7 +75,7 @@
- {% ifequal work_list.count 0 %} + {% if not work_list.exists %} There aren't any {{ pub_lang|ez_lang_name }} {% if cc.is_cc %}Creative Commons{% endif %} {{ license }} works in this list. Why don't you add some? Use the "More" tab on a work, there are links there for adding ebooks. {% else %} {% lazy_paginate 20 work_list using "work_list" %} @@ -91,7 +92,7 @@ {{ page.number }} {% endfor %}
- {% endifequal %} + {% endif %}
diff --git a/frontend/templates/comments.html b/frontend/templates/comments.html index d9731116a..f1c54b174 100644 --- a/frontend/templates/comments.html +++ b/frontend/templates/comments.html @@ -10,6 +10,7 @@ {% block extra_head %} + {% endblock %} diff --git a/frontend/templates/download.html b/frontend/templates/download.html index 94f95abef..422368685 100644 --- a/frontend/templates/download.html +++ b/frontend/templates/download.html @@ -13,436 +13,395 @@ {% endblock %} -{% block extra_css %} - -{% endblock %} {% block avatar %} -private + private {% endblock %} {% block content %}
-
- - {% if show_beg %} - {% if work.last_campaign.ask_money %} -
- {{ work.last_campaign.description|safe }} -
-
-
-
Say thank you for making {{ work.title }} free.
+
diff --git a/frontend/templates/work.html b/frontend/templates/work.html index 6a40d939b..7b8b856b1 100644 --- a/frontend/templates/work.html +++ b/frontend/templates/work.html @@ -1,11 +1,13 @@ {% extends 'base.html' %} +{% load cache %} {% load comments %} {% load humanize %} {% load lib_acqs %} {% load purchased %} {% load sass_tags %} -{% block title %}— + +{% block title %}— {% if work.is_free %} {{ work.title }} is a Free eBook. {% for fmt in work.formats %}[{{ fmt }}]{% endfor %} {% else %} @@ -13,6 +15,7 @@ {% endif %}{% if action == 'editions' %} – All Editions{% endif %} {% endblock %} {% block extra_meta %} +{% cache 600 head work.id %} @@ -20,228 +23,516 @@ {% for author in work.relators %}{% endfor %} {% if work.first_isbn_13 %}{% endif %} +{% endcache %} {% endblock %} {% block extra_css %} - {% if user.is_staff or user in work.last_campaign.managers.all %} - + {{ kwform.media.css }} + {% endif %} + {% endblock %} -{% block content %} -{% with work.id as work_id %} -
-
- {% if work.uses_google_cover %} - - Find {{ work.title }} at Google Books - - {% else %} - {{ work.title }} - {% endif %} -
-
-
-

{{ work.title }}

-

- - {% if work.authors.count == 2 %} - and - {% endif %}{% if work.relators.count > 2 %}{% for author in work.relators %}{% if not forloop.first %}, {% endif %}{% endfor %} - {% endif %} -

-

- {% if work.last_campaign.publisher %} - {{ work.last_campaign.publisher }} - {% endif %} - - - -

-
-
- {% if status == 'ACTIVE' %} - {% if work.last_campaign.type != 3 %} - {{ work.last_campaign.description|safe }} - {% else %} - {{ work.description|safe }} - {% endif %} - {% elif work.description %} - {{ work.description|safe }} - {% else %} - {{ work.last_campaign.description|safe }} - {% endif %} +{% block extra_js %} + + + + + + +{% if user.is_staff or user in work.last_campaign.managers.all %} + {{ kwform.media.js }} +{% endif %} -
- {% if action == 'display' %} - {% if status == 'ACTIVE' %} - {% if work.last_campaign.type == 1 %} -

A campaign is running to unglue {{work.title}}!

-

The rights holder, {% for claim in work.claim.all %} - {% if claim.status == 'active' %} - {{ claim.rights_holder.rights_holder_name }} - {% endif %} - {% endfor %} - , has agreed to release {{work.title}} to the world as a Creative Commons licensed ebook ({{ work.last_campaign.license }}) if ungluers can join together to raise ${{ work.last_campaign.target|floatformat:0|intcomma }} by {{ work.last_campaign.deadline }}. - You can help!

- {% endif %} - {% if work.last_campaign.type == 2 %} -

A Buy-to-Unglue Campaign is running to unglue {{work.title}}!

-

The rights holder, {% for claim in work.claim.all %} - {% if claim.status == 'active' %} - {{ claim.rights_holder.rights_holder_name }} - {% endif %} - {% endfor %} - , has agreed to release {{work.title}} to the world as a Creative Commons licensed ebook ({{ work.last_campaign.license }}) on {{ work.last_campaign.cc_date }}. For every copy that ungluers purchase, that date gets sooner. ${{ work.last_campaign.left|floatformat:0|intcomma }} of sales will unglue the book TODAY. - You can help!

- {% endif %} - {% if work.last_campaign.type == 3 %} -

A Thanks-for-Ungluing Campaign is running to reward the creators of {{work.title}}!

-

The rights holder, {% for claim in work.claim.all %} - {% if claim.status == 'active' %} - {{ claim.rights_holder.rights_holder_name }} - {% endif %} - {% endfor %} - , has released {{work.title}} to the world as a Creative Commons licensed ebook ({{ work.last_campaign.license }}) . - You can help us say "Thank You!" so that other creators will do the same.

- {% endif %} -

Campaign details: the fine print

- {{ work.last_campaign.details|safe }} - {% endif %} +{% endblock %} - {% if status == 'SUCCESSFUL' %} -

A campaign has succeeded to unglue {{work.title}}!

-

The rights holder, {% for claim in work.claim.all %} - {% if claim.status == 'active' %} - {{ claim.rights_holder.rights_holder_name }} - {% endif %} - {% endfor %} - , has agreed to release {{work.title}} to the world as a Creative Commons licensed ebook ({{ work.last_campaign.license }}) thanks to the efforts of ungluers like you.

-

Campaign details: the fine print

- {{ work.last_campaign.details|safe }} - {% endif %} - {% if status != 'ACTIVE' and status != 'SUCCESSFUL' %} -

Rights Information

- {% if claimstatus == 'one_active' %} -

This work has been claimed by {{ rights_holder_name }}.

- {% else %} - {% if claimstatus == 'disputed' %} -

Rights claims are pending.

+{% block topsection %} +{% if work.last_campaign.status == 'ACTIVE' %} + {% if request.user in work.last_campaign.managers.all %} +
Hi, {{ request.user.username }}. Since you're a manager for this campaign, you can edit this campaign.
+ {% endif %} +{% elif not work.user_with_rights %} + {% if request.user.rights_holder.all %} +
Hi, {{ request.user.username }}. Since you're an authorized Unglue.it rights holder, if you own the worldwide electronic rights to this work, you may claim it through the More... tab. Need help? Check out the rights holder tools page.
+ {% endif %} +{% elif request.user == work.user_with_rights %} + {% if work.last_campaign.status != 'SUCCESSFUL' %} +
Hi, {{ request.user.username }}. Since you're a rights holder for this work, you can launch a campaign.
+ {% endif %} +{% endif %} +{% endblock %} + +{% block content %} +{% purchased %} +{% lib_acqs %} +{% with work.last_campaign_status as status %} +{% with work.id as work_id %} +
+
+
+ {% include "explore.html" %} +
+
+
+
+
+ {% cache 600 book-cover work_id %} + {% if work.uses_google_cover %} +
+ + Find {{ work.title }} at Google Books +
{% else %} - {% if claimstatus == 'one_pending' %} -

A claim for this work by {{ rights_holder_name }} is pending.

+
+ {{ work.title }} +
+ {% endif %} + {% endcache %} +
+ {% cache 600 book-detail work_id %} +
+

{{ work.title }}

+
+
+

+ {% if work.authors.count == 2 %} + and + {% endif %}{% if work.relators.count > 2 %}{% for author in work.relators %}{% if not forloop.first %}, {% endif %}{% endfor %} + {% endif %} +

+

+ {% if work.last_campaign.publisher %} + {{ work.last_campaign.publisher }} + {% endif %} + + {{ work.language }} + {{ work.work.age_range }} +

+
+
+
+ {% if status == 'ACTIVE' %} + {% if work.last_campaign.type != 3 %} +
+
+
+ {{ work.percent_of_goal }}% of goal +
+ {% endif %} +
+
+ {% if work.last_campaign.type == 1 %} + ${{ work.last_campaign.current_total|floatformat:0|intcomma }} pledged + {% endif %} + {% if work.last_campaign.type == 2 %} + current ungluing date: + {% endif %} + {% if work.last_campaign.type == 3 %} + ${{ work.last_campaign.current_total|floatformat:0|intcomma }} of thanks from + {% endif %} +
+
+ {% if work.last_campaign.type == 1 %} + ${{ work.last_campaign.target|floatformat:0|intcomma }} goal + {% endif %} + {% if work.last_campaign.type == 2 %} + {{ work.last_campaign.cc_date|date:"M j, Y" }} + After {{ work.last_campaign.cc_date|date:"M j, Y" }} this book will be available for free to anyone, anywhere. Every purchase before then brings that date closer. + {% endif %} + {% if work.last_campaign.type != 3 %} +
+
+ {% endif %} + {% if work.last_campaign.supporters_count == 1 %} + 1 ungluer + {% else %} + {{ work.last_campaign.supporters_count }} ungluers + {% endif %} + {% if work.last_campaign.type == 3 %} +
+ {% if work.last_campaign.anon_count == 1 %} + 1 other + {% else %} + {{ work.last_campaign.anon_count }} others + {% endif %} + {% endif %} +
+ {% if work.last_campaign.type == 2 %} +
+ {% if work.lib_acqs.count == 1 %} + 1 copy in a library + {% else %} + {{ work.lib_acqs.count }} in libraries + {% endif %} +
+ {% endif %} + {% if work.last_campaign.type != 3 %} +
+ {% if work.last_campaign.type == 1 %} + {{ work.last_campaign.countdown }} to go + {% else %} + ${{ work.last_campaign.left|floatformat:0|intcomma }} to go + ${{ work.last_campaign.left|floatformat:0|intcomma }} is the amount it would take to make this ebook free to the world tomorrow. + {% endif %} +
+ {% endif %} +
{% else %} - {% if request.user.rights_holder.all.count %} - Is this work yours? Claim it:

- - - {% csrf_token %} - {{ claimform.user }} - {{ claimform.work }} - {{ claimform.rights_holder }} - -
- {% else %} - Are you the author or publisher of this work? If so, you can claim it as yours by registering as an Unglue.it rights holder. + {% if status == 'SUCCESSFUL' %} +
+ This campaign succeeded on {{ work.last_campaign.success_date|date:"M j, Y" }}. +
+
+
+ {% if work.last_campaign.supporters_count == 1 %} + 1 ungluer + {% else %} + {{ work.last_campaign.supporters_count }} ungluers + {% endif %} +
+
+ ${{ work.last_campaign.current_total|floatformat:0|intcomma }} raised +
+
+ ${{ work.last_campaign.target|floatformat:0|intcomma }} goal +
+
+ Unglued! +
+
{% endif %} + +
+ {% if wishers == 1 %} + 1 Ungluer has + {% else %} + {{ wishers }} Ungluers have + {% endif %} Faved this Work +
{% endif %} - {% endif %} - {% endif %} - {% endif %} - {% if work.is_free %} -

Downloads

-
- This work has been downloaded {{ work.download_count }} times via unglue.it ebook links. -
    - {% for ebook in work.ebooks.all %} -
  1. {{ ebook.download_count }} - {{ ebook.format }} {% if ebook.version_label %} ({{ ebook.version_label }}) {% endif %}({{ ebook.rights }}) at {{ ebook.provider }}.
  2. - {% endfor %} -
-
- {% if user.is_staff %} -

- Feature this work today. -

- {% endif %} - {% endif %} - {% if user.is_staff %} -

Related Works

- - {% endif %} - {% endif %} -
-
-
-
- {% if has_online_book %} - - {% endif %} - - -
-
- {% if request.user.is_anonymous %} -
- {% elif request.user.id in work.last_campaign.supporters %} -
- Faved! + {% get_comment_count for work as comment_count %} + {% if action == 'editions' %} + - {% elif work in request.user.wishlist.works.all %} -
- Remove from My Faves + {% else %} + - {% else %} -
- Add to My Faves + {% endif %} +
+
+
+
+ {% if status == 'ACTIVE' %} + {% if work.last_campaign.type != 3 %} + {{ work.last_campaign.description|safe }} + {% else %} + {{ work.description|safe }} + {% endif %} + {% elif work.description %} + {{ work.description|safe }} + {% else %} + {{ work.last_campaign.description|safe }} + {% endif %} +
+
+ {% for work_rel in work.works_related_to.all %} + {% if work_rel.from_work.language != 'xx' and work.language != 'xx' %} +

+ This work is a {{ work_rel.relation }} of {{ work_rel.from_work }}. +

+ {% endif %} + {% endfor %} + {% for work_rel in work.works_related_from.all %} + {% if work.language != 'xx' and work_rel.to_work.language != 'xx' %} +

+ {{ work_rel.to_work }} is a {{ work_rel.relation }} of this work. +

+ {% endif %} + {% endfor %} + {% if work.doab %} +

+ This book is included in DOAB. +

+ {% endif %} + {% if work.gtbg %} +

+ This book is included in Project Gutenberg. +

+ {% endif %} +
+
+
+
+

Why {% if work.ebooks.all %}read{% else %}unglue{% endif %} this book? Have your say.

+
+ {% render_comment_list for work %} + {% if user.is_authenticated %} + {% render_comment_form for work %} + {% else %} +

You must be logged in to comment.

+ {% endif %} +
+
+
+
+ {% if request.user.is_staff or request.user in work.last_campaign.managers.all %} +
+ {% csrf_token %} + + {% for wish in work.wishes.all reversed %} + {% with wish.wishlist.user as supporter %} +
+ + + Avatar for {{ supporter }} + + +
+ email +
+
+ {{ supporter }}
+ Wished: {{ wish.created }}
+ {% if supporter.id in work.last_campaign.supporters %}Pledged!
{% endif %} + {% if supporter in work.last_campaign.ungluers.all %}Supported!
{% endif %} +
+
+
+ + {% endwith %} + {% endfor %} +
+ {% else %} + {% for wish in work.wishes.all reversed %} + {% with wish.wishlist.user as supporter %} + + {% endwith %} + {% endfor %} + {% endif %} +
+
+
+
+ {% if action == 'display' %} + {% if status == 'ACTIVE' %} + {% if work.last_campaign.type == 1 %} +

A campaign is running to unglue {{work.title}}!

+

The rights holder, {% for claim in work.claim.all %} + {% if claim.status == 'active' %} + {{ claim.rights_holder.rights_holder_name }} + {% endif %} + {% endfor %} + , has agreed to release {{work.title}} to the world as a Creative Commons licensed ebook ({{ work.last_campaign.license }}) if ungluers can join together to raise ${{ work.last_campaign.target|floatformat:0|intcomma }} by {{ work.last_campaign.deadline }}. + You can help!

+ {% endif %} + {% if work.last_campaign.type == 2 %} +

A Buy-to-Unglue Campaign is running to unglue {{work.title}}!

+

The rights holder, {% for claim in work.claim.all %} + {% if claim.status == 'active' %} + {{ claim.rights_holder.rights_holder_name }} + {% endif %} + {% endfor %} + , has agreed to release {{work.title}} to the world as a Creative Commons licensed ebook ({{ work.last_campaign.license }}) on {{ work.last_campaign.cc_date }}. For every copy that ungluers purchase, that date gets sooner. ${{ work.last_campaign.left|floatformat:0|intcomma }} of sales will unglue the book TODAY. + You can help!

+ {% endif %} + {% if work.last_campaign.type == 3 %} +

A Thanks-for-Ungluing Campaign is running to reward the creators of {{work.title}}!

+

The rights holder, {% for claim in work.claim.all %} + {% if claim.status == 'active' %} + {{ claim.rights_holder.rights_holder_name }} + {% endif %} + {% endfor %} + , has released {{work.title}} to the world as a Creative Commons licensed ebook ({{ work.last_campaign.license }}) . + You can help us say "Thank You!" so that other creators will do the same.

+ {% endif %} +

Campaign details: the fine print

+ {{ work.last_campaign.details|safe }} + {% endif %} + + {% if status == 'SUCCESSFUL' %} +

A campaign has succeeded to unglue {{work.title}}!

+

The rights holder, {% for claim in work.claim.all %} + {% if claim.status == 'active' %} + {{ claim.rights_holder.rights_holder_name }} + {% endif %} + {% endfor %} + , has agreed to release {{work.title}} to the world as a Creative Commons licensed ebook ({{ work.last_campaign.license }}) thanks to the efforts of ungluers like you.

+

Campaign details: the fine print

+ {{ work.last_campaign.details|safe }} + {% endif %} + {% if status != 'ACTIVE' and status != 'SUCCESSFUL' %} +

Rights Information

+ {% if claimstatus == 'one_active' %} +

This work has been claimed by {{ rights_holder_name }}.

+ {% else %} + {% if claimstatus == 'disputed' %} +

Rights claims are pending.

+ {% else %} + {% if claimstatus == 'one_pending' %} +

A claim for this work by {{ rights_holder_name }} is pending.

+ {% else %} + {% if request.user.rights_holder.all.count %} + Is this work yours? Claim it:

+ +
+ {% csrf_token %} + {{ claimform.user }} + {{ claimform.work }} + {{ claimform.rights_holder }} + +

+ {% else %} + Are you the author or publisher of this work? If so, you can claim it as yours by registering as an Unglue.it rights holder. + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% if work.is_free %} +

Downloads

+
+ This work has been downloaded {{ work.download_count }} times via unglue.it ebook links. +
    + {% for ebook in work.ebooks.all %} +
  1. {{ ebook.download_count }} - {{ ebook.format }} {% if ebook.version_label %} ({{ ebook.version_label }}) {% endif %}({{ ebook.rights }}) at {{ ebook.provider }}.
  2. + {% endfor %} +
+
+ {% if user.is_staff %} +

+ Feature this work today. +

+ {% endif %} + {% endif %} + {% if user.is_staff %} +

Related Works

+ + + {% endif %} +

Keywords

+ {% if work.subjects.all.count > 0 %} +
    + {% for subject in work.subjects.all %} +
  • {{ subject.name }} + {% if user.is_staff or user in work.last_campaign.managers.all %} + x + {% endif %} +
  • + {% endfor %} +
+ {% else %} + No keywords yet. +
    + + {% endif %} + {% if user_can_edit_work %} +
    {% csrf_token %} + {{ kwform.add_kw }} +
    + {% endif %} + {% endif %} + {% with doi=work.doi http_id=work.http_id %} + {% if doi or http_id %} +

    Links

    + {% if doi %} + DOI: {{ doi }}
    + {% endif %} + {% if http_id %} + web: {{ http_id }}
    + {% endif %} + {% endif %} + {% endwith %} +

    Editions

    + {% if alert %} +

    {{ alert }}
    + {% endif %} + {% if user_can_edit_work %} + + {% endif %} + + {% if action == 'editions' %} + {% include 'split.html' %} + {% else %} + {% with work.preferred_edition as edition %} + {% include 'edition_display.html' %} + {% endwith %} + {% if not campaign %} + {% for edition in editions %} + {% if edition != work.preferred_edition %} + {% include 'edition_display.html' %} + {% endif %} + {% endfor %} + {% endif %} + + {% endif %} + +
    +
    - {% endif %} +
    - -
    - - Find on LibraryThing - LibraryThing - +
    + {% include 'work_action.html' %}
    -
    - {% include "explore.html" %} -
    -
    -
    -

    Comments

    - {% render_comment_list for work %} - {% if user.is_authenticated %} - {% render_comment_form for work %} - {% else %} -

    You must be logged in to comment.

    - {% endif %}
    {% endwith %} +{% endwith %} {% endblock %} diff --git a/frontend/templates/work_action.html b/frontend/templates/work_action.html index 2523b0a2e..2f53beb0b 100644 --- a/frontend/templates/work_action.html +++ b/frontend/templates/work_action.html @@ -47,12 +47,12 @@

    Share

    {% block userblock2 %} - With your help we're raising money to give these {% if pub_lang %}{{pub_lang|ez_lang_name}} language {% endif %}books to the world. + These {% if pub_lang %}{{pub_lang|ez_lang_name}} language {% endif %}books are Free! {% endblock %}
    @@ -60,12 +60,7 @@
    -
    - +
    • View As:
    • @@ -79,9 +74,15 @@
    + +
    - {% ifequal work_list.count 0 %} + {% if not work_list.exists %} {% block noworks %} There aren't any {{ pub_lang|ez_lang_name }} works in this list yet. {% block add_more %}Why not add your favorite books to your list, so we can feature them here?{% endblock %} {% endblock %} @@ -89,47 +90,18 @@ {% lazy_paginate 20 works_unglued using "works_unglued" %} {% for work in works_unglued %}
    - {% with googlebooks_id=work.googlebooks_id tab_override='tabs-1' %} + {% with googlebooks_id=work.googlebooks_id %} {% include "book_panel.html" %} {% endwith %}
    {% endfor %} -
    {% if request.user.libpref %} diff --git a/frontend/templates/worksummary.html b/frontend/templates/worksummary.html index ecfdf11e2..4bf96d83b 100644 --- a/frontend/templates/worksummary.html +++ b/frontend/templates/worksummary.html @@ -36,8 +36,8 @@

    {{ work.last_campaign.publisher }} {% endif %} - - + {{ work.language }} + {{ work.work.age_range }}

    diff --git a/frontend/templatetags/bookpanel.py b/frontend/templatetags/bookpanel.py index 814bd65dd..88516230f 100644 --- a/frontend/templatetags/bookpanel.py +++ b/frontend/templatetags/bookpanel.py @@ -19,14 +19,14 @@ def bookpanel(context): supported = False if campaign and campaign.type == REWARDS: if campaign.status == 'ACTIVE': - if not user.is_anonymous() and user.transaction_set.filter(campaign__work=work): + if not user.is_anonymous and user.transaction_set.filter(campaign__work=work): supported = True context['supported'] = supported show_pledge = False if campaign and campaign.type == REWARDS: if campaign.status == 'ACTIVE': - if user.is_anonymous() or not supported: + if user.is_anonymous or not supported: show_pledge = True context['show_pledge'] = show_pledge @@ -39,7 +39,7 @@ def bookpanel(context): show_purchase = False if campaign and campaign.type == BUY2UNGLUE: - if user.is_anonymous() or not context.get('license_is_active', False): + if user.is_anonymous or not context.get('license_is_active', False): if campaign.status == 'ACTIVE': if not context.get('borrowable', False): if not library: diff --git a/frontend/templatetags/lib_acqs.py b/frontend/templatetags/lib_acqs.py index caa0ddc20..e6b28e23f 100644 --- a/frontend/templatetags/lib_acqs.py +++ b/frontend/templatetags/lib_acqs.py @@ -10,7 +10,7 @@ def lib_acqs(context): lib_user = library.user else: user = context['request'].user - if user.is_anonymous(): + if user.is_anonymous: return '' else: lib_user = (lib.user for lib in user.profile.libraries) diff --git a/frontend/templatetags/purchased.py b/frontend/templatetags/purchased.py index 8a6223c55..eb2e446b6 100644 --- a/frontend/templatetags/purchased.py +++ b/frontend/templatetags/purchased.py @@ -10,7 +10,7 @@ def purchased(context): try: work.id # sometimes work is a dict user = context['request'].user - if user.is_anonymous(): + if user.is_anonymous: return '' try: user_license = work.get_user_license(user) @@ -22,8 +22,8 @@ def purchased(context): context['borrowable'] = None context['in_library'] = None holds = user.holds.filter(work=work) - if holds.count(): - context['on_hold'] = holds[0] + if holds.exists(): + context['on_hold'] = holds.first() if user_license: context['purchased'] = user_license.purchased context['borrowed'] = user_license.borrowed diff --git a/frontend/templatetags/truncatechars.py b/frontend/templatetags/truncatechars.py deleted file mode 100644 index 2754ebf75..000000000 --- a/frontend/templatetags/truncatechars.py +++ /dev/null @@ -1,186 +0,0 @@ -""" -The truncatechars filter is part of Django dev, but we're on 1.3.1 -The following is the filter and its dependencies -To use this filter, put "{% load truncatechars %}" at the beginning of your template, -then {{ myvariable|truncatechars:num }} -""" -import unicodedata - -from django import template -from django.template import Library -from django.template.defaultfilters import stringfilter -from django.utils.encoding import force_unicode -from django.utils.functional import allow_lazy, SimpleLazyObject -from django.utils.translation import pgettext - -register = Library() - -class Truncator(SimpleLazyObject): - """ - An object used to truncate text, either by characters or words. - """ - def __init__(self, text): - super(Truncator, self).__init__(lambda: force_unicode(text)) - - def add_truncation_text(self, text, truncate=None): - if truncate is None: - truncate = pgettext( - 'String to return when truncating text', - u'%(truncated_text)s...') - truncate = force_unicode(truncate) - if '%(truncated_text)s' in truncate: - return truncate % {'truncated_text': text} - # The truncation text didn't contain the %(truncated_text)s string - # replacement argument so just append it to the text. - if text.endswith(truncate): - # But don't append the truncation text if the current text already - # ends in this. - return text - return '%s%s' % (text, truncate) - - def chars(self, num, truncate=None): - """ - Returns the text truncated to be no longer than the specified number - of characters. - - Takes an optional argument of what should be used to notify that the - string has been truncated, defaulting to a translatable string of an - ellipsis (...). - """ - length = int(num) - uniself = unicode(self._wrapped) - text = unicodedata.normalize('NFC', uniself) - - # Calculate the length to truncate to (max length - end_text length) - truncate_len = length - for char in self.add_truncation_text('', truncate): - if not unicodedata.combining(char): - truncate_len -= 1 - if truncate_len == 0: - break - - s_len = 0 - end_index = None - for i, char in enumerate(text): - if unicodedata.combining(char): - # Don't consider combining characters - # as adding to the string length - continue - s_len += 1 - if end_index is None and s_len > truncate_len: - end_index = i - if s_len > length: - # Return the truncated string - return self.add_truncation_text(text[:end_index or 0], - truncate) - - # Return the original string since no truncation was necessary - return text - chars = allow_lazy(chars) - - def words(self, num, truncate=None, html=False): - """ - Truncates a string after a certain number of words. Takes an optional - argument of what should be used to notify that the string has been - truncated, defaulting to ellipsis (...). - """ - length = int(num) - if html: - return self._html_words(length, truncate) - return self._text_words(length, truncate) - words = allow_lazy(words) - - def _text_words(self, length, truncate): - """ - Truncates a string after a certain number of words. - - Newlines in the string will be stripped. - """ - words = self._wrapped.split() - if len(words) > length: - words = words[:length] - return self.add_truncation_text(u' '.join(words), truncate) - return u' '.join(words) - - def _html_words(self, length, truncate): - """ - Truncates HTML to a certain number of words (not counting tags and - comments). Closes opened tags if they were correctly closed in the - given HTML. - - Newlines in the HTML are preserved. - """ - if length <= 0: - return u'' - html4_singlets = ( - 'br', 'col', 'link', 'base', 'img', - 'param', 'area', 'hr', 'input' - ) - # Count non-HTML words and keep note of open tags - pos = 0 - end_text_pos = 0 - words = 0 - open_tags = [] - while words <= length: - m = re_words.search(self._wrapped, pos) - if not m: - # Checked through whole string - break - pos = m.end(0) - if m.group(1): - # It's an actual non-HTML word - words += 1 - if words == length: - end_text_pos = pos - continue - # Check for tag - tag = re_tag.match(m.group(0)) - if not tag or end_text_pos: - # Don't worry about non tags or tags after our truncate point - continue - closing_tag, tagname, self_closing = tag.groups() - # Element names are always case-insensitive - tagname = tagname.lower() - if self_closing or tagname in html4_singlets: - pass - elif closing_tag: - # Check for match in open tags list - try: - i = open_tags.index(tagname) - except ValueError: - pass - else: - # SGML: An end tag closes, back to the matching start tag, - # all unclosed intervening start tags with omitted end tags - open_tags = open_tags[i + 1:] - else: - # Add it to the start of the open tags list - open_tags.insert(0, tagname) - if words <= length: - # Don't try to close tags if we don't need to truncate - return self._wrapped - out = self._wrapped[:end_text_pos] - truncate_text = self.add_truncation_text('', truncate) - if truncate_text: - out += truncate_text - # Close any tags still open - for tag in open_tags: - out += '' % tag - # Return string - return out - -# django dev uses filter(is_safe=True) syntax here, but that's not yet available in 1.3.1 -@register.filter() -@stringfilter -def truncatechars(value, arg): - """ - Truncates a string after a certain number of characters. - - Argument: Number of characters to truncate after. - """ - try: - length = int(arg) - except ValueError: # Invalid literal for int(). - return value # Fail silently. - return Truncator(value).chars(length) -truncatechars.is_safe = True \ No newline at end of file diff --git a/frontend/templatetags/urldecode.py b/frontend/templatetags/urldecode.py index 94d87c6e2..500227b5a 100644 --- a/frontend/templatetags/urldecode.py +++ b/frontend/templatetags/urldecode.py @@ -1,7 +1,7 @@ """ {{ raw|urldecode }} """ -from urllib import unquote +from urllib.parse import unquote from django.template import Library from django.template.defaultfilters import stringfilter @@ -12,4 +12,4 @@ @stringfilter def urldecode(value): return unquote(value) - + diff --git a/frontend/tests.py b/frontend/tests.py index c182e51b5..faa72acb9 100755 --- a/frontend/tests.py +++ b/frontend/tests.py @@ -7,10 +7,11 @@ from decimal import Decimal as D #django imports +from django.conf import settings from django.contrib import auth from django.contrib.auth.models import User from django.core import mail -from django.core.urlresolvers import reverse +from django.urls import reverse from django.test import TestCase from django.test.client import Client from django.utils.timezone import now @@ -36,7 +37,7 @@ def test_add_remove(self): HTTP_X_REQUESTED_WITH="XMLHttpRequest") self.assertEqual(r.status_code, 200) self.assertEqual(self.user.wishlist.works.all().count(), 1) - wished = self.user.wishlist.works.all()[0] + wished = self.user.wishlist.works.first() # test the work page r = self.client.get("/work/%s/" % wished.id) self.assertEqual(r.status_code, 200) @@ -65,7 +66,7 @@ def test_anonymous(self): r = anon_client.get("/work/{}/".format(self.work.id)) r = anon_client.head("/work/{}/".format(self.work.id)) self.assertEqual(r.status_code, 200) - csrfmatch = re.search("name='csrfmiddlewaretoken' value='([^']*)'", r.content) + csrfmatch = re.search("name='csrfmiddlewaretoken' value='([^']*)'", str(r.content, 'utf-8')) self.assertFalse(csrfmatch) r = anon_client.post("/work/{}/kw/".format(self.work.id)) self.assertEqual(r.status_code, 302) @@ -73,27 +74,27 @@ def test_anonymous(self): def can_edit(self, client, can=True): r = client.get("/work/{}/".format(self.work.id)) self.assertEqual(r.status_code, 200) - csrfmatch = re.search("name='csrfmiddlewaretoken' value='([^']*)'", r.content) + csrfmatch = re.search("name='csrfmiddlewaretoken' value='([^']*)'", str(r.content, 'utf-8')) self.assertTrue(csrfmatch) csrf = csrfmatch.group(1) r = client.post("/work/{}/kw/".format(self.work.id), { - u'csrfmiddlewaretoken': csrf, - u'kw_add':u'true', - u'add_kw_0':u'Fiction', - u'add_kw_1':self.kw.id + 'csrfmiddlewaretoken': csrf, + 'kw_add':'true', + 'add_kw_0':'Fiction', + 'add_kw_1':self.kw.id }) if can: - self.assertEqual(r.content, u'Fiction') + self.assertEqual(r.content, b'Fiction') else: - self.assertEqual(r.content, u'true') + self.assertEqual(r.content, b'true') r = client.post("/work/{}/kw/".format(self.work.id), { - u'csrfmiddlewaretoken': csrf, - u'remove_kw' : u'Fiction' + 'csrfmiddlewaretoken': csrf, + 'remove_kw' : 'Fiction' }) if can: - self.assertEqual(r.content, u'removed Fiction') + self.assertEqual(r.content, b'removed Fiction') else: - self.assertEqual(r.content, u'False') + self.assertEqual(r.content, b'False') def test_user(self): # test non-RightsHolder @@ -220,7 +221,7 @@ def setUp(self): #self.assertEqual(self.client.session['_auth_user_id'], self.user.pk) user = auth.get_user(self.client) - assert user.is_authenticated() + assert user.is_authenticated # load a Work by putting it on the User's wishlist @@ -228,7 +229,7 @@ def setUp(self): HTTP_X_REQUESTED_WITH="XMLHttpRequest") self.assertEqual(r.status_code, 200) self.assertEqual(self.user.wishlist.works.all().count(), 1) - wished = self.user.wishlist.works.all()[0] + wished = self.user.wishlist.works.first() # test the work page r = self.client.get("/work/%s/" % wished.id) self.assertEqual(r.status_code, 200) @@ -237,7 +238,7 @@ def setUp(self): self.assertEqual(r.status_code, 200) # load a Work and a Campaign to create a Pledge page - self.work = self.user.wishlist.works.all()[0] + self.work = self.user.wishlist.works.first() self.campaign = Campaign(target=D('1000.00'), deadline=now() + timedelta(days=180), work=self.work, description='dummy description') self.campaign.save() @@ -289,6 +290,11 @@ def verify_setup(self): # how many works and campaigns? self.assertEqual(Work.objects.count(), 3) self.assertEqual(Campaign.objects.count(), 2) + + # disable notification queuing + from notification import models + models.QUEUE_ALL = False + def do_test_junk_webhook(self): """send in junk json and then an event that doesn't exist""" @@ -348,12 +354,12 @@ def pledge_to_work_with_cc(self, username, password, work_id, card, preapproval_ transaction = Transaction.objects.get(id=t_id) # catch any exception and pass it along + charge_exception = None try: self.assertTrue(pm.execute_transaction(transaction, ())) - except Exception, charge_exception: - pass - else: - charge_exception = None + except Exception as e: + charge_exception = e + # retrieve events from this period events = list(sc._all_objs('Event', created={'gte': time0})) @@ -363,7 +369,7 @@ def pledge_to_work_with_cc(self, username, password, work_id, card, preapproval_ def good_cc_scenario(self): # how much of test.campaigntest.test_relaunch can be done here? - card1 = card(number=TEST_CARDS[0][0], exp_month=1, exp_year='2020', cvc='123', name='Raymond Yee', + card1 = card(number=TEST_CARDS[0][0], exp_month=1, exp_year='2030', cvc='123', name='Raymond Yee', address_line1="100 Jackson St.", address_line2="", address_zip="94706", address_state="CA", address_country=None) # good card (events, charge_exception) = self.pledge_to_work_with_cc(username="RaymondYee", password="Test_Password_", work_id=1, card=card1, @@ -373,11 +379,10 @@ def good_cc_scenario(self): # expect to have 3 events (there is a possibility that someone else could be running tests on this stripe account at the same time) # events returned sorted in reverse chronological order. - - self.assertEqual(len(events), 3) - self.assertEqual(events[0].type, 'charge.succeeded') - self.assertEqual(events[1].type, 'customer.card.created') - self.assertEqual(events[2].type, 'customer.created') + evt_types = [event.type for event in events] + self.assertTrue('charge.succeeded' in evt_types) + self.assertTrue('customer.card.created' in evt_types) + self.assertTrue('customer.created' in evt_types) # now feed each of the events to the IPN processor. ipn_url = reverse("HandleIPN", args=('stripelib',)) @@ -406,10 +411,10 @@ def bad_cc_scenario(self): # expect to have 3 events (there is a possibility that someone else could be running tests on this stripe account at the same time) # events returned sorted in reverse chronological order. - self.assertEqual(len(events), 3) - self.assertEqual(events[0].type, 'charge.failed') - self.assertEqual(events[1].type, 'customer.card.created') - self.assertEqual(events[2].type, 'customer.created') + evt_types = [event.type for event in events] + self.assertTrue('charge.failed' in evt_types) + self.assertTrue('customer.card.created' in evt_types) + self.assertTrue('customer.created' in evt_types) # now feed each of the events to the IPN processor. ipn_url = reverse("HandleIPN", args=('stripelib',)) @@ -429,7 +434,7 @@ def recharge_with_new_card(self): c.save() # set up a good card - card1 = card(number=TEST_CARDS[0][0], exp_month=1, exp_year='2020', cvc='123', name='dataunbound', + card1 = card(number=TEST_CARDS[0][0], exp_month=1, exp_year='2030', cvc='123', name='dataunbound', address_line1="100 Jackson St.", address_line2="", address_zip="94706", address_state="CA", address_country=None) # good card sc = StripeClient() @@ -453,7 +458,7 @@ def recharge_with_new_card(self): self.assertEqual(r.status_code, 200) # a charge should now go through - self.assertEqual(len(Notice.objects.filter(notice_type__label='pledge_charged', recipient__username='dataunbound')), 1) + self.assertTrue(Notice.objects.filter(notice_type__label='pledge_charged', recipient__username='dataunbound').exists()) def test_good_bad_cc_scenarios(self): @@ -466,12 +471,12 @@ def test_good_bad_cc_scenarios(self): self.assertEqual(len(mail.outbox), 9) # expect these 6 notices : - # u'pledge_charged', , - # u'pledge_failed', , - # u'new_wisher', , - # u'pledge_you_have_pledged', , - # u'pledge_charged', , - # u'pledge_you_have_pledged', , + # 'pledge_charged', , + # 'pledge_failed', , + # 'new_wisher', , + # 'pledge_you_have_pledged', , + # 'pledge_charged', , + # 'pledge_you_have_pledged', , # plus two customer creation emails def stripe_token_none(self): @@ -512,3 +517,7 @@ def stripe_token_none(self): r = self.client.post(pledge_fund_path, data={'stripe_token':stripe_token}, follow=True) self.assertEqual(r.status_code, 200) + def tearDown(self): + from notification import models + models.QUEUE_ALL = settings.NOTIFICATION_QUEUE_ALL + diff --git a/frontend/urls.py b/frontend/urls.py index ec583cb12..8de5a43f5 100644 --- a/frontend/urls.py +++ b/frontend/urls.py @@ -1,5 +1,5 @@ from django.conf import settings -from django.conf.urls import patterns, url, include +from django.conf.urls import url, include from django.contrib.admin.views.decorators import staff_member_required from django.contrib.auth.decorators import login_required from django.contrib.sites.models import Site @@ -31,15 +31,9 @@ url(r"^rightsholders/agree/submitted$", TemplateView.as_view(template_name='agreed.html'), name="agreed"), url(r"^rightsholders/campaign/(?P\d+)/$", views.manage_campaign, name="manage_campaign"), url(r"^rightsholders/campaign/(?P\d+)/results/$", views.manage_campaign, {'action': 'results'}, name="campaign_results"), - url(r"^rightsholders/campaign/(?P\d+)/(?P\d+)/makemobi/$", views.manage_campaign, {'action': 'makemobi'}, name="makemobi"), - url(r"^rightsholders/campaign/(?P\d+)/mademobi/$", views.manage_campaign, {'action': 'mademobi'}, name="mademobi"), url(r"^rightsholders/edition/(?P\d*)/(?P\d*)$", views.edit_edition, {'by': 'rh'}, name="rh_edition"), url(r"^rightsholders/edition/(?P\d*)/upload/$", views.edition_uploads, name="edition_uploads"), url(r"^rightsholders/claim/$", login_required(views.claim), name="claim"), - url(r"^rightsholders/surveys/$", views.surveys, name="surveys"), - url(r"^rightsholders/new_survey/(?P\d*)/?$", views.new_survey, name="new_survey"), - url(r"^rightsholders/surveys/answers_(?P\d+)_(?P\d*).csv$", views.export_surveys, name="survey_answers"), - url(r"^rightsholders/surveys/summary_(?P\d+)_(?P\d*).csv$", views.surveys_summary, name="survey_summary"), url(r"^rh_admin/$", views.rh_admin, name="rh_admin"), url(r"^rh_admin/accepted/$", views.rh_admin, {'facet': 'accepted'}, name="accepted"), url(r"^campaign_admin/$", views.campaign_admin, name="campaign_admin"), @@ -51,6 +45,7 @@ url(r"^campaigns/(?P\w*)$", views.CampaignListView.as_view(), name='campaign_list'), url(r"^campaigns/(?P\w*)/marc/$", views.CampaignListView.as_view(send_marc=True), name='campaign_list_marc'), url(r"^lists/(?P\w*)$", views.WorkListView.as_view(), name='work_list'), + url(r"^lists/(?P\w*)(?P)$", views.WorkListView.as_view(), name='work_list_nopub'), url(r"^lists/(?P\w*)/marc/$", views.WorkListView.as_view(send_marc=True), name='work_list_marc'), url(r"^free/(?P.*)/marc/$", views.FacetedView.as_view(send_marc=True), name='faceted_list_marc'), url(r"^free/(?P.*)/$", views.FacetedView.as_view(), name='faceted_list'), @@ -65,11 +60,6 @@ url(r"^creativecommons/$", views.FacetedView.as_view(), name='cc_list'), url(r"^creativecommons/(?P[^\s]*)/marc/$", views.FacetedView.as_view(send_marc=True), name='cc_list_marc'), url(r"^creativecommons/(?P[^\s]*)$", views.FacetedView.as_view(), name='cc_list_detail'), - url(r"^goodreads/auth/$", views.goodreads_auth, name="goodreads_auth"), - url(r"^goodreads/auth_cb/$", views.goodreads_cb, name="goodreads_cb"), - url(r"^goodreads/flush/$", views.goodreads_flush_assoc, name="goodreads_flush_assoc"), - url(r"^goodreads/load_shelf/$", views.goodreads_load_shelf, name="goodreads_load_shelf"), - url(r"^goodreads/shelves/$", views.goodreads_calc_shelves, name="goodreads_calc_shelves"), url(r"^stub/", views.stub, name="stub"), url(r"^work/(?P\d+)/$", views.work, name="work"), url(r"^work/(?P\d+)/preview/$", views.work, {'action': 'preview'}, name="work_preview"), @@ -88,9 +78,7 @@ url(r"^work/(?P\d+)/editions/$", views.work,{'action': 'editions'}, name="work_editions"), url(r"^work/\d+/acks/images/(?P[\w\.]*)$", views.static_redirect_view,{'dir': 'images'}), url(r"^work/(?P\d+)/librarything/$", views.work_librarything, name="work_librarything"), - url(r"^work/(?P\d+)/goodreads/$", views.work_goodreads, name="work_goodreads"), url(r"^work/(?P\d+)/openlibrary/$", views.work_openlibrary, name="work_openlibrary"), - url(r"^read/(?P\d+)/$", views.read, name="read"), url(r"^new_edition/(?P)(?P)$", views.edit_edition, name="new_edition"), url(r"^new_edition/(?P\d*)/(?P\d*)$", views.edit_edition, name="new_edition"), url(r"^manage_ebooks/(?P\d*)$", views.manage_ebooks, name="manage_ebooks"), @@ -106,7 +94,7 @@ url(r"^fund/complete/$", views.FundCompleteView.as_view(), name="pledge_complete"), url(r"^pledge/modified/$", login_required(views.PledgeModifiedView.as_view()), name="pledge_modified"), url(r"^pledge/modify/(?P\d+)$", login_required(views.PledgeView.as_view()), name="pledge_modify"), - url(r"^payment/donation/new$", csrf_exempt(views.NewDonationView.as_view()), name="newdonation" ), + url(r"^payment/donation/create$", csrf_exempt(views.NewDonationView.as_view()), name="newdonation" ), url(r"^payment/fund/(?P\d+)$", views.FundView.as_view(), name="fund" ), url(r"^pledge/recharge/(?P\d+)$", login_required(views.PledgeRechargeView.as_view()), name="pledge_recharge"), url(r"^purchase/(?P\d+)/$", login_required(views.PurchaseView.as_view(),login_url='/accounts/login/purchase/'), name="purchase"), @@ -118,12 +106,15 @@ url('^404testing/$', TemplateView.as_view(template_name='404.html') ), url('^500testing/$', TemplateView.as_view(template_name='500.html')), url('^robots.txt$', TemplateView.as_view(template_name='robots.txt',content_type='text/plain')), + url(r'favicon.ico$', views.static_redirect_view, {'file_name': 'favicon.ico', 'dir': 'images'}, name="favicon"), url(r"^emailshare/(?P\w*)/?$", views.emailshare, name="emailshare"), url(r"^feedback/campaign/(?P\d+)/?$", views.ask_rh, name="ask_rh"), url(r"^feedback/$", views.feedback, name="feedback"), url(r"^feedback/thanks/$", TemplateView.as_view(template_name="thanks.html")), url(r"^about/$", TemplateView.as_view(template_name="about_main.html"), name="about"), + url(r"^about/funds/$", TemplateView.as_view(template_name="about_funds.html"), + name="about_funds"), url(r"^comments/$", views.comment, name="comment"), url(r"^info/(?P[\w\.]*)$", views.InfoPageView.as_view()), url(r"^info/languages/(?P[\w\.]*)$", views.InfoLangView.as_view()), @@ -149,7 +140,5 @@ if settings.DEBUG: urlpatterns += [ - url(r"^goodreads/$", login_required(views.GoodreadsDisplayView.as_view()), name="goodreads_display"), - url(r"^goodreads/clear_wishlist/$", views.clear_wishlist, name="clear_wishlist"), url(r"^celery/clear/$", views.clear_celery_tasks, name="clear_celery_tasks"), -] +] \ No newline at end of file diff --git a/frontend/views/__init__.py b/frontend/views/__init__.py index fef8080db..cb547faaf 100755 --- a/frontend/views/__init__.py +++ b/frontend/views/__init__.py @@ -1,14 +1,15 @@ ''' external library imports ''' +import functools import re import sys import json import logging -import urllib +from urllib.parse import unquote import requests -from datetime import timedelta, date +from datetime import timedelta, date, datetime from decimal import Decimal as D from itertools import chain from notification import models as notification @@ -29,9 +30,10 @@ from django.core.exceptions import ValidationError from django.core.files.storage import default_storage from django.core.mail import EmailMessage -from django.core.urlresolvers import reverse, reverse_lazy +from django.urls import reverse, reverse_lazy from django.core.validators import validate_email from django.db.models import Q, Count, Sum +from django.db.utils import IntegrityError from django.forms import Select from django.forms.models import inlineformset_factory from django.http import ( @@ -64,8 +66,7 @@ userlists, ) import regluit.core.cc as cc -from regluit.core.bookloader import merge_works, detach_edition -from regluit.core.goodreads import GoodreadsClient +from regluit.core.bookloader import merge_works, detach_editions from regluit.core.isbn import ISBN from regluit.core.search import gluejar_search from regluit.core.signals import supporter_message @@ -78,7 +79,6 @@ CampaignPledgeForm, CampaignPurchaseForm, CampaignThanksForm, - GoodreadsShelfLoadingForm, RightsHolderForm, UserClaimForm, LibraryThingForm, @@ -104,7 +104,6 @@ RegiftForm, SubjectSelectForm, MapSubjectForm, - SurveyForm, DonationForm, ) @@ -129,14 +128,23 @@ from regluit.libraryauth.models import Library from regluit.marc.views import qs_marc_records from regluit.utils.localdatetime import date_today -from questionnaire.models import Landing, Questionnaire -from questionnaire.views import export_summary as answer_summary, export_csv as export_answers from .bibedit import edit_edition, user_can_edit_work, safe_get_work, get_edition from .rh_views import campaign_results, claim, manage_campaign, rh_admin, RHAgree, rh_tools logger = logging.getLogger(__name__) +def log_time(method): + @functools.wraps(method) + def wrapper(*args, **kwargs): + start_time = datetime.now() + page = method(*args, **kwargs) + end_time = datetime.now() + logging.debug('returned %s. Total time: %s' % (method, (end_time - start_time))) + return page + return wrapper + + def static_redirect_view(request, file_name, dir=""): return HttpResponseRedirect('/static/'+dir+"/"+file_name) @@ -171,14 +179,14 @@ def process_kindle_email(request): download + login/account creation; add kindle email to profile """ user = request.user - if user.is_authenticated() and request.session.has_key('kindle_email'): + if user.is_authenticated and 'kindle_email' in request.session: user.profile.kindle_email = request.session['kindle_email'] user.profile.save() request.session.pop('kindle_email') def next(request): - if request.COOKIES.has_key('next'): - response = HttpResponseRedirect(urllib.unquote(urllib.unquote(request.COOKIES['next']))) + if 'next' in request.COOKIES: + response = HttpResponseRedirect(unquote(unquote(request.COOKIES['next']))) response.delete_cookie('next') return response else: @@ -192,9 +200,10 @@ def cover_width(work): return cover_width +@log_time def home(request, landing=False): faves = None - if request.user.is_authenticated() : + if request.user.is_authenticated : next = request.GET.get('next', False) if next: # should happen only for new users @@ -210,7 +219,7 @@ def home(request, landing=False): featured = models.Work.objects.filter(featured__isnull=False).distinct().order_by('-featured')[0] except: #shouldn't occur except in tests - featured = models.Work.objects.all()[0] + featured = models.Work.objects.first() top_pledge = models.Campaign.objects.filter(status="ACTIVE", type=REWARDS).order_by('left')[:4] top_b2u = models.Campaign.objects.filter(status="ACTIVE", type=BUY2UNGLUE).order_by('-work__num_wishes')[:4] top_t4u = models.Campaign.objects.exclude(id = featured.id).filter(status="ACTIVE", type=THANKS).order_by('-work__num_wishes')[:4] @@ -270,7 +279,7 @@ def home(request, landing=False): reverse=True ) - if request.user.is_authenticated(): + if request.user.is_authenticated: events = latest_actions[:12] else: events = latest_actions[:6] @@ -298,18 +307,7 @@ def stub(request): def acks(request, work): return render(request, 'front_matter.html', {'campaign': work.last_campaign()}) -def read(request, work_id): - work = safe_get_work(work_id) - try: - ebook_id = work.first_epub().id - url = get_object_or_404(models.Ebook, id=ebook_id).url - except (ValueError, AttributeError): - raise Http404 - return render(request, 'read.html', { - 'work': work, - 'url': url, - }) - +@log_time def work(request, work_id, action='display'): work = safe_get_work(work_id) alert = '' @@ -323,10 +321,11 @@ def work(request, work_id, action='display'): if request.method == "POST" and (request.user.is_staff or (work.last_campaign() and request.user in work.last_campaign().managers.all())): formset = EditionFormSet(data=request.POST, instance=work) if formset.is_valid(): - for form in formset.deleted_forms: - detach_edition(form.instance) + to_split = [form.instance for form in formset.deleted_forms] + if to_split: + detach_editions(to_split) alert = 'editions have been split' - if request.POST.has_key('select_edition'): + if 'select_edition' in request.POST: selected_id = request.POST['select_edition'] try: work.selected_edition = work.editions.get(id=selected_id) @@ -338,7 +337,7 @@ def work(request, work_id, action='display'): formset = EditionFormSet(instance=work) # process waiting add request - if not request.user.is_anonymous() and request.session.has_key("add_wishlist"): + if not request.user.is_anonymous and "add_wishlist" in request.session: add_url = request.session["add_wishlist"] if add_url == request.path: request.user.wishlist.add_work(work, "login", notify=True) @@ -346,7 +345,7 @@ def work(request, work_id, action='display'): process_kindle_email(request) - if request.method == 'POST' and not request.user.is_anonymous(): + if request.method == 'POST' and not request.user.is_anonymous: activetab = '4' elif action == 'editions': activetab = '4' @@ -375,7 +374,7 @@ def work(request, work_id, action='display'): if action == 'preview': work.last_campaign_status = 'ACTIVE' - if not request.user.is_anonymous(): + if not request.user.is_anonymous: claimform = UserClaimForm(request.user, initial={'work':work.pk, 'user': request.user.id}, prefix = 'claim') else: claimform = None @@ -423,13 +422,12 @@ def work(request, work_id, action='display'): 'cover_width': cover_width_number, 'action': action, 'formset': formset, - 'kwform': SubjectSelectForm(), - 'has_online_book': work.first_epub() != None, + 'kwform': SubjectSelectForm() }) def edition_uploads(request, edition_id): context = {} - if not request.user.is_authenticated() : + if not request.user.is_authenticated: return render(request, "admins_only.html") edition = get_edition(edition_id) campaign_type = edition.work.last_campaign().type @@ -465,7 +463,7 @@ def edition_uploads(request, edition_id): context['upload_error'] = e form.instance.delete() else: - tasks.process_ebfs.delay(edition.work.last_campaign()) + tasks.process_ebfs.delay(edition.work.last_campaign().id) if form.instance.id: new_ebook = models.Ebook.objects.create( edition=edition, @@ -507,18 +505,19 @@ def manage_ebooks(request, edition_id, by=None): ebook_form = EbookForm(data = request.POST, files=request.FILES,) if ebook_form.is_valid(): if ebook_form.cleaned_data.get('file', None): + file=ebook_form.cleaned_data['file'] new_ebf = models.EbookFile.objects.create( - file=ebook_form.cleaned_data['file'], + file=file, format=ebook_form.cleaned_data['format'], edition=edition, - active=True, - ) ebook_form.instance.url = new_ebf.file.url ebook_form.instance.provider = "Unglue.it" ebook_form.instance.save() new_ebf.ebook = ebook_form.instance new_ebf.save() + new_ebf.ebook.filesize = new_ebf.file.size + new_ebf.ebook.save() else: ebook_form.save() ebook_form.instance.set_next_iter() @@ -558,20 +557,24 @@ def googlebooks(request, googlebooks_id): return HttpResponseNotFound("failed looking up googlebooks id %s" % googlebooks_id) try: edition = bookloader.add_by_googlebooks_id(googlebooks_id) - if edition.new: + if edition and edition.new: # add related editions asynchronously tasks.populate_edition.delay(edition.isbn_13) - if request.user.is_authenticated(): + if request.user.is_authenticated: request.user.profile.works.add(edition.work) except bookloader.LookupFailure: logger.warning("failed to load googlebooks_id %s" % googlebooks_id) return HttpResponseNotFound("failed looking up googlebooks id %s" % googlebooks_id) + except IntegrityError: + logger.warning("duplicate (maybe) googlebooks_id %s" % googlebooks_id) + return HttpResponseNotFound("failed adding googlebooks id %s" % googlebooks_id) + if not edition: return HttpResponseNotFound("invalid googlebooks id") work_url = reverse('work', kwargs={'work_id': edition.work_id}) # process waiting add request - if not request.user.is_anonymous() and request.session.has_key("add_wishlist"): + if not request.user.is_anonymous and "add_wishlist" in request.session: add_url = request.session["add_wishlist"] if add_url == request.path: request.user.wishlist.add_work(edition.work, "login", notify=True) @@ -616,7 +619,7 @@ def form_valid(self, form): context = self.get_context_data() context['subject'] = form.cleaned_data['subject'] context['onto_subject'] = form.cleaned_data['onto_subject'] - if self.request.POST.has_key('confirm_map_subject'): + if 'confirm_map_subject' in self.request.POST: initial_count = context['onto_subject'].works.all().count() initial_free_count = context['onto_subject'].works.filter(is_free=True).count() context['onto_subject'].works.add(*list(context['subject'].works.all())) @@ -631,7 +634,7 @@ def form_valid(self, form): class FilterableListView(ListView): send_marc = False def get_queryset(self): - if self.request.GET.has_key('pub_lang'): + if 'pub_lang' in self.request.GET: if self.model is models.Campaign: return self.get_queryset_all().filter(work__language=self.request.GET['pub_lang']) else: @@ -641,7 +644,7 @@ def get_queryset(self): def get_context_data(self, **kwargs): context = super(FilterableListView, self).get_context_data(**kwargs) - if self.request.GET.has_key('pub_lang'): + if 'pub_lang' in self.request.GET: context['pub_lang'] = self.request.GET['pub_lang'] else: context['pub_lang'] = '' @@ -660,42 +663,23 @@ def render_to_response(self, context, **response_kwargs): class WorkListView(FilterableListView): template_name = "work_list.html" context_object_name = "work_list" - max_works = 100000 def get_queryset_all(self): facet = self.kwargs.get('facet', None) - if (facet == 'popular'): - return models.Work.objects.exclude(num_wishes=0).order_by('-num_wishes', 'id') - elif (facet == 'recommended'): + if facet == 'popular': + return models.Work.objects.exclude(num_wishes=0).order_by('-num_wishes') + elif facet == 'recommended': self.template_name = "recommended.html" return models.Work.objects.filter(wishlists__user=recommended_user).order_by('-num_wishes') - elif (facet == 'new'): - return models.Work.objects.exclude(num_wishes=0).order_by('-created', '-num_wishes' ,'id') else: - return models.Work.objects.all().order_by('-created', 'id') + return models.Work.objects.all().order_by('-created') def get_context_data(self, **kwargs): context = super(WorkListView, self).get_context_data(**kwargs) qs = self.get_queryset() - context['facet'] = self.kwargs.get('facet','') - works_unglued = qs.filter(is_free = True).distinct() | qs.filter(campaigns__status='SUCCESSFUL').distinct() - context['works_unglued'] = works_unglued[:self.max_works] - context['works_active'] = qs.filter(campaigns__status='ACTIVE').distinct()[:self.max_works] - context['works_wished'] = qs.filter(is_free=False).exclude(campaigns__status='ACTIVE').exclude(campaigns__status='SUCCESSFUL').distinct()[:self.max_works] - - counts = {} - counts['unglued'] = context['works_unglued'].count() - counts['unglueing'] = context['works_active'].count() - counts['wished'] = context['works_wished'].count() - context['counts'] = counts - - if counts['unglueing']: - context['activetab'] = "#2" - elif counts['unglued']: - context['activetab'] = "#1" - else: - context['activetab'] = "#3" - + context['facet'] = self.kwargs.get('facet','all') + context['works_unglued'] = qs.filter(is_free=True).distinct() + context['url_name'] = 'work_list_nopub' return context class FacetedView(FilterableListView): @@ -706,6 +690,10 @@ def get_queryset_all(self): self.vertex = get_facet_object(facet_path) order_by = self.request.GET.get('order_by', 'newest') + + # robots occasionally mangle order_by + order_by = order_by if order_by in ORDER_BY_KEYS else 'newest' + #special cases if order_by == 'subjects': return self.vertex.get_query_set().annotate(kws=Count('subjects')).order_by('kws') @@ -715,7 +703,7 @@ def get_context_data(self, **kwargs): context = super(FacetedView, self).get_context_data(**kwargs) facet = self.kwargs.get('facet','all') qs = self.get_queryset() - if self.request.GET.has_key('setkw') and self.request.user.is_staff: + if 'setkw' in self.request.GET and self.request.user.is_staff: setkw = self.request.GET['setkw'] try: context['setkw'] = models.Subject.objects.get(name=setkw) @@ -725,7 +713,11 @@ def get_context_data(self, **kwargs): context['tab_override'] = 'tabs-1' context['path'] = self.vertex.get_facet_path().replace('//','/').strip('/') context['vertex'] = self.vertex - context['order_by'] = self.request.GET.get('order_by', 'newest') + + order_by = self.request.GET.get('order_by', 'newest') + # robots occasionally mangle order_by + context['order_by'] = order_by if order_by in ORDER_BY_KEYS else 'newest' + context['view_as'] = self.request.GET.get('view_as', None) return context @@ -733,7 +725,6 @@ def get_context_data(self, **kwargs): class ByPubView(WorkListView): template_name = "bypub_list.html" context_object_name = "work_list" - max_works = 100000 publisher_name = None publisher = None @@ -745,8 +736,8 @@ def get_publisher_name(self): self.set_publisher() def set_publisher(self): - if self.publisher_name.key_publisher.count(): - self.publisher = self.publisher_name.key_publisher.all()[0] + if self.publisher_name.key_publisher.exists(): + self.publisher = self.publisher_name.key_publisher.first() elif self.publisher_name.publisher: self.publisher = self.publisher_name.publisher self.publisher_name = self.publisher.name @@ -755,11 +746,11 @@ def get_queryset_all(self): facet = self.kwargs.get('facet','') self.get_publisher_name() objects = models.Work.objects.filter(editions__publisher_name__id=self.publisher_name.id).distinct() - if (facet == 'popular'): + if facet == 'popular': return objects.order_by('-num_wishes', 'id') - elif (facet == 'pubdate'): + elif facet == 'pubdate': return objects.order_by('-editions__publication_date') # turns out this messes up distinct, and MySQL doesn't support DISTINCT ON - elif (facet == 'new'): + elif facet == 'new': return objects.filter(num_wishes__gt=0).order_by('-created', '-num_wishes' ,'id') else: return objects.order_by('title', 'id') @@ -769,6 +760,7 @@ def get_context_data(self, **kwargs): context['pubname'] = self.publisher_name context['publisher'] = self.publisher context['facet'] = self.kwargs.get('facet','all') + context['url_name'] = 'bypub_list' return context @@ -873,7 +865,7 @@ def get_context_data(self, **kwargs): return context def get_form_class(self): - if self.request.method == 'POST' and self.request.POST.has_key('confirm_merge_works'): + if self.request.method == 'POST' and 'confirm_merge_works' in self.request.POST: return WorkForm else: return OtherWorkForm @@ -888,7 +880,7 @@ def get_form_kwargs(self): def form_valid(self, form): other_work = form.cleaned_data['other_work'] context = self.get_context_data() - if self.request.POST.has_key('confirm_merge_works'): + if 'confirm_merge_works' in self.request.POST: context['old_work_id'] = other_work.id self.work = merge_works(self.work, other_work, self.request.user) context['merge_complete'] = True @@ -957,7 +949,7 @@ def get_preapproval_amount(self): def get_form_kwargs(self): - assert self.request.user.is_authenticated() + assert self.request.user.is_authenticated self.work = safe_get_work(self.kwargs["work_id"]) # if there is no campaign or if campaign is not active, we should raise an error @@ -967,7 +959,7 @@ def get_form_kwargs(self): self.premiums = self.campaign.custom_premiums() | models.Premium.objects.filter(id=150) # Campaign must be ACTIVE assert self.campaign.status == 'ACTIVE' - except Exception, e: + except Exception as e: # this used to raise an exception, but that seemed pointless. # This now has the effect of preventing any pledges. return {} @@ -978,7 +970,7 @@ def get_form_kwargs(self): type=PAYMENT_TYPE_AUTHORIZATION ) premium_id = self.request.GET.get('premium_id', self.request.POST.get('premium_id', 150)) - if transactions.count() == 0: + if not transactions.exists(): ack_name = self.request.user.profile.ack_name ack_dedication = '' anonymous = self.request.user.profile.anon_pref @@ -998,11 +990,11 @@ def get_form_kwargs(self): 'ack_name':ack_name, 'ack_dedication':ack_dedication, 'anonymous':anonymous} if self.request.method == 'POST': self.data.update(self.request.POST.dict()) - if not self.request.POST.has_key('anonymous'): + if not 'anonymous' in self.request.POST: del self.data['anonymous'] - if not self.request.POST.has_key('ack_name'): + if not 'ack_name' in self.request.POST: del self.data['ack_name'] - if not self.request.POST.has_key('ack_dedication'): + if not 'ack_dedication' in self.request.POST: del self.data['ack_dedication'] return {'data':self.data} else: @@ -1100,7 +1092,7 @@ def get_context_data(self, **kwargs): return context def get_form_kwargs(self): - assert self.request.user.is_authenticated() + assert self.request.user.is_authenticated self.work = safe_get_work(self.kwargs["work_id"]) # if there is no campaign or if campaign is not active, we should raise an error @@ -1108,7 +1100,7 @@ def get_form_kwargs(self): self.campaign = self.work.last_campaign() # Campaign must be ACTIVE assert self.campaign.status == 'ACTIVE' - except Exception, e: + except Exception as e: # this used to raise an exception, but that seemed pointless. This now has the effect of preventing any pledges. return {} self.data = { @@ -1121,7 +1113,7 @@ def get_form_kwargs(self): data.update(self.data) self.data = data self.data['give'] = self.give - if not self.request.POST.has_key('anonymous'): + if not 'anonymous' in self.request.POST: del self.data['anonymous'] return {'data':self.data} else: @@ -1163,14 +1155,16 @@ class NewDonationView(FormView): def form_valid(self, form): p = PaymentManager() t, url = p.process_transaction('USD', form.cleaned_data["amount"], - user = self.request.user, - paymentReason="Donation to {}".format(COMPANY_TITLE), - ) + user=self.request.user, + paymentReason=form.cleaned_data.get("reason", ""), + ) if url: return HttpResponseRedirect(url) else: - logger.error("Attempt to produce transaction id {0} failed".format(t.id)) - return HttpResponse("Our attempt to set up your donation failed. We have logged this problem.") + logger.error("Attempt to produce transaction id %s failed", t.id) + return HttpResponse( + "Our attempt to set up your donation failed. We have logged this problem." + ) class FundView(FormView): @@ -1179,7 +1173,7 @@ class FundView(FormView): action = None def get_form_class(self): - if self.request.user.is_anonymous(): + if self.request.user.is_anonymous: return AnonCCForm elif self.request.user.profile.account: return AccountCCForm @@ -1188,7 +1182,7 @@ def get_form_class(self): def get_form_kwargs(self): kwargs = super(FundView, self).get_form_kwargs() - if kwargs.has_key('data'): + if 'data' in kwargs: data = kwargs['data'].copy() kwargs['data'] = data else: @@ -1215,7 +1209,7 @@ def get_form_kwargs(self): data.update( {'preapproval_amount':self.transaction.needed_amount, - 'username':self.request.user.username if self.request.user.is_authenticated() else None, + 'username':self.request.user.username if self.request.user.is_authenticated else None, 'work_id':self.transaction.campaign.work_id if self.transaction.campaign else None, 'title':self.transaction.campaign.work.title if self.transaction.campaign else COMPANY_TITLE} ) @@ -1238,7 +1232,7 @@ def form_valid(self, form): return_url = "%s?tid=%s" % (reverse('pledge_complete'), self.transaction.id) if not self.transaction.campaign: - if self.request.user.is_authenticated(): + if self.request.user.is_authenticated: self.transaction.user = self.request.user # if there's an email address, put it in the receipt column, so far unused. self.transaction.receipt = form.cleaned_data.get("email", None) @@ -1246,12 +1240,12 @@ def form_valid(self, form): elif self.transaction.campaign.type == THANKS and self.transaction.user == None: #anonymous user, just charge the card! - if self.request.user.is_authenticated(): + if self.request.user.is_authenticated: self.transaction.user = self.request.user # if there's an email address, put it in the receipt column, so far unused. self.transaction.receipt = form.cleaned_data.get("email", None) t, url = p.charge(self.transaction, return_url = return_url, token=stripe_token) - elif self.request.user.is_anonymous(): + elif self.request.user.is_anonymous: #somehow the user lost their login return HttpResponseRedirect(reverse('superlogin')) elif self.transaction.user.id != self.request.user.id: @@ -1317,12 +1311,12 @@ def get_context_data(self, **kwargs): return context except CreditLog.DoesNotExist: #not used yet! - amount = envelope['amount']+envelope['cents']/D(100) + amount = envelope['amount'] + envelope['cents'] // D(100) CreditLog.objects.create(user=user, amount=amount, action='deposit', sent=envelope['sent']) ts = Transaction.objects.filter(user=user, campaign=campaign, status=TRANSACTION_STATUS_NONE).order_by('-pk') - if ts.count()==0: + if not ts.exists(): ts = Transaction.objects.filter(user=user, campaign=campaign, status=TRANSACTION_STATUS_MODIFIED).order_by('-pk') - if ts.count()>0: + if ts.exists(): t = ts[0] credit_transaction(t, user, amount) for t in ts[1:]: @@ -1346,7 +1340,7 @@ def get_context_data(self, **kwargs): context = super(PledgeRechargeView, self).get_context_data(**kwargs) # the following should be true since PledgeView.as_view is wrapped in login_required - assert self.request.user.is_authenticated() + assert self.request.user.is_authenticated user = self.request.user work = safe_get_work(self.kwargs["work_id"]) @@ -1405,7 +1399,7 @@ def get(self, request, *args, **kwargs): return DownloadView.as_view()(request, work=self.transaction.campaign.work) else: - if request.user.is_authenticated(): + if request.user.is_authenticated: if self.user_is_ok(): return self.render_to_response(context) else: @@ -1448,7 +1442,7 @@ def get_context_data(self): try: campaign = self.transaction.campaign work = campaign.work - except Exception, e: + except Exception as e: campaign = None work = None @@ -1457,7 +1451,7 @@ def get_context_data(self): if not self.user_is_ok(): return context - gift = self.transaction.extra.has_key('give_to') + gift = 'give_to' in self.transaction.extra if not gift: # add the work corresponding to the Transaction on the user's wishlist if it's not already on the wishlist if self.transaction.user is not None and (campaign is not None) and (work is not None): @@ -1495,7 +1489,7 @@ def get_context_data(self, **kwargs): # the following should be true since PledgeCancelView.as_view is wrapped in login_required - if self.request.user.is_authenticated(): + if self.request.user.is_authenticated: user = self.request.user else: context["error"] = "You are not logged in." @@ -1513,7 +1507,7 @@ def get_context_data(self, **kwargs): work = campaign.work transactions = campaign.transactions().filter(user=user, status=TRANSACTION_STATUS_ACTIVE) - if transactions.count() < 1: + if not transactions.exists(): context["error"] = "You don't have an active transaction for this campaign." return context elif transactions.count() > 1: @@ -1544,7 +1538,7 @@ def form_valid(self, form): campaign_id = self.request.POST.get('campaign_id', self.request.GET.get('campaign_id')) # this following logic should be extraneous. - if self.request.user.is_authenticated(): + if self.request.user.is_authenticated: user = self.request.user else: return HttpResponse("You need to be logged in.") @@ -1580,7 +1574,7 @@ def form_valid(self, form): else: logger.error("Attempt to cancel transaction id {0} failed".format(transaction.id)) return HttpResponse("Our attempt to cancel your transaction failed. We have logged this error.") - except Exception, e: + except Exception as e: logger.error("Exception from attempt to cancel pledge for campaign id {0} for username {1}: {2}".format(campaign_id, user.username, e)) return HttpResponse("Sorry, something went wrong in canceling your campaign pledge. We have logged this error.") @@ -1589,98 +1583,9 @@ def works_user_can_admin(user): Q(claim__user = user) | Q(claim__rights_holder__owner = user) ) -def works_user_can_admin_filter(request, work_id): - def work_survey_filter(answers): - works = works_user_can_admin(request.user) - if work_id == '0' and request.user.is_staff: - return answers - elif work_id: - work = safe_get_work(work_id) - if user_can_edit_work(request.user, work): - return answers.filter(run__run_info_histories__landing__works=work) - else: - return answers.none() - else: - return answers.filter(run__run_info_histories__landing__works__in=works) - return work_survey_filter - -def export_surveys(request, qid, work_id): - def extra_entries(subject, run): - landing = completed = None - try: - landing = run.run_info_histories.all()[0].landing - completed = run.run_info_histories.all()[0].completed - except IndexError: - try: - landing = run.run_infos.all()[0].landing - completed = run.run_infos.all()[0].created - except IndexError: - label = wid = "error" - if landing: - label = landing.label - wid = landing.object_id - return [wid, subject.ip_address, run.id, completed, label] - if not request.user.is_authenticated() : - return HttpResponseRedirect(reverse('surveys')) - extra_headings = [u'work id', u'subject ip address', u'run id', u'date completed', u'landing label'] - return export_answers(request, qid, - answer_filter=works_user_can_admin_filter(request, work_id), - extra_entries=extra_entries, - extra_headings=extra_headings, - filecode=work_id) - -def surveys_summary(request, qid, work_id): - if not request.user.is_authenticated() : - return HttpResponseRedirect(reverse('surveys')) - return answer_summary( - request, - qid, - answer_filter=works_user_can_admin_filter(request, work_id), - ) - - -def new_survey(request, work_id): - if not request.user.is_authenticated() : - return HttpResponseRedirect(reverse('surveys')) - my_works = works_user_can_admin( request.user) - if work_id: - work = safe_get_work(work_id) - for my_work in my_works: - if my_work == work: - form = SurveyForm() - break - else: - return HttpResponseRedirect(reverse('surveys')) - else: - work = None - form = SurveyForm() - if request.method == 'POST': - form = SurveyForm(data=request.POST) - if form.is_valid(): - if not work and form.work: - for my_work in my_works: - print '{} {}'.format(my_work.id, form.work_id) - if my_work == form.work: - work = form.work - break - else: - print 'not mine' - return HttpResponseRedirect(reverse('surveys')) - print "create landing" - landing = Landing.objects.create(label=form.cleaned_data['label'], questionnaire=form.cleaned_data['survey'], content_object=work) - return HttpResponseRedirect(reverse('surveys')) - return render(request, "manage_survey.html", {"work":work, "form":form}) - -def surveys(request): - if not request.user.is_authenticated() : - return render(request, "surveys.html") - works = works_user_can_admin(request.user) - work_ids = [work.id for work in works] - surveys = Questionnaire.objects.filter(landings__object_id__in=work_ids).distinct() - return render(request, "surveys.html", {"works":works, "surveys":surveys}) def campaign_admin(request): - if not request.user.is_authenticated() : + if not request.user.is_authenticated: return render(request, "admins_only.html") if not request.user.is_staff : return render(request, "admins_only.html") @@ -1734,7 +1639,7 @@ def campaigns_types(): else: check_status_results += "

    No payments needed updating

    " command_status = _("Transactions updated based on PaymentDetails and PreapprovalDetails") - except Exception, e: + except Exception as e: check_status_results = e elif 'execute_campaigns' in request.POST.keys(): c_id = request.POST.get('active_campaign', None) @@ -1743,7 +1648,7 @@ def campaigns_types(): campaign = models.Campaign.objects.get(id=c_id) results = pm.execute_campaign(campaign) command_status = str(results) - except Exception, e: + except Exception as e: command_status = "Error in executing transactions for campaign %s " % (str(e)) elif 'finish_campaigns' in request.POST.keys(): c_id = request.POST.get('incomplete_campaign', None) @@ -1752,7 +1657,7 @@ def campaigns_types(): campaign = models.Campaign.objects.get(id=c_id) results = pm.finish_campaign(campaign) command_status = str(results) - except Exception, e: + except Exception as e: command_status = "Error in finishing transactions for campaign %s " % (str(e)) elif 'cancel_campaigns' in request.POST.keys(): @@ -1762,7 +1667,7 @@ def campaigns_types(): campaign = models.Campaign.objects.get(id=c_id) results = pm.cancel_campaign(campaign) command_status = str(results) - except Exception, e: + except Exception as e: command_status = "Error in canceling transactions for campaign %s " % (str(e)) (campaigns_with_active_transactions, campaigns_with_incomplete_transactions, campaigns_with_completed_transactions, @@ -1822,30 +1727,13 @@ def supporter(request, supporter_username, template_name, extra_context={}): activetab = "#3" # following block to support profile admin form in supporter page - if request.user.is_authenticated() and request.user.username == supporter_username: + if request.user.is_authenticated and request.user.username == supporter_username: profile_obj = request.user.profile if request.method == 'POST': profile_form = ProfileForm(data=request.POST, instance=profile_obj) if profile_form.is_valid(): - if profile_form.cleaned_data['clear_facebook'] or profile_form.cleaned_data['clear_twitter'] or profile_form.cleaned_data['clear_goodreads'] : - if profile_form.cleaned_data['clear_facebook']: - profile_obj.facebook_id = 0 - if profile_obj.avatar_source == models.FACEBOOK: - profile_obj.avatar_source = models.UNGLUEITAR - if profile_form.cleaned_data['clear_twitter']: - profile_obj.twitter_id = "" - if profile_obj.avatar_source == models.TWITTER: - profile_obj.avatar_source = models.UNGLUEITAR - if profile_form.cleaned_data['clear_goodreads']: - profile_obj.goodreads_user_id = None - profile_obj.goodreads_user_name = None - profile_obj.goodreads_user_link = None - profile_obj.goodreads_auth_token = None - profile_obj.goodreads_auth_secret = None - - profile_obj.save() profile_form.save() else: @@ -1881,7 +1769,7 @@ def library(request, library_name): except Library.DoesNotExist: raise Http404 works_active = models.Work.objects.filter(acqs__user=library.user, acqs__license=LIBRARY).distinct() - if works_active.count() > 0: + if works_active.exists(): context['works_active'] = works_active context['activetab'] = "#2" context['ungluers'] = userlists.library_users(library, 5) @@ -1916,42 +1804,70 @@ def form_valid(self, form): return render(self.request, self.template_name, self.get_context_data()) def search(request): + """ + request params + q the query + ty type - au is author, g or anything else is general + gbo >0 = start in google books 0 = start in unglue.it + page = page number for unglue.it results + gbpage = page number for google results + + max 10 items per page from either unglue.it or gb + possible results - 1 page of unglue.it results + - an additional page of unglue.it + - the last page of ungluit + - beginning of gb, + - more from gb. + + + """ q = request.GET.get('q', '').strip() ty = request.GET.get('ty', 'g') # ge= 'general, au= 'author' request.session['q'] = q + gbo = request.GET.get('gbo', '0') # gbo says where to start try: page = int(request.GET.get('page', 1)) except ValueError: # garbage in page page = 1 - gbo = request.GET.get('gbo', 'n') # gbo is flag for google books only - our_stuff = Q(is_free=True) | Q(campaigns__isnull=False) - if len(q) > 1 and page == 1 and not gbo == 'y': + try: + gbpage = int(request.GET.get('gbpage', 1)) + except ValueError: + # garbage in page + gbpage = 1 + + start = (page - 1) * 10 + end = page * 10 + + our_stuff = Q(is_free=True) + out = [] + if len(q) > 1 and gbo == '0': isbnq = ISBN(q) if isbnq.valid: work_query = Q(identifiers__value=str(isbnq), identifiers__type="isbn") elif ty == 'au': work_query = Q(editions__authors__name=q) else: - work_query = Q(title__icontains=q) | Q(editions__authors__name__icontains=q) | Q(subjects__name__iexact=q) - campaign_works = models.Work.objects.filter(our_stuff).filter(work_query).distinct() - for work in campaign_works: - results = models.Work.objects.none() - break - else: - if is_bad_robot(request): - results = models.Work.objects.none() - else: - results = gluejar_search(q, user_ip=request.META['REMOTE_ADDR'], page=1) - gbo = 'y' - else: - if gbo == 'n': - page = page-1 # because page=1 is the unglue.it results + work_query = Q(title__istartswith=q) + ug_works = models.Work.objects.filter(our_stuff).filter(work_query).distinct() + out = ug_works[start:end] + + if len(out) < 10: + ug_more = 'no' + page = 1 + if is_bad_robot(request): results = models.Work.objects.none() else: - results = gluejar_search(q, user_ip=request.META['REMOTE_ADDR'], page=page) - campaign_works = None + results = gluejar_search(q, user_ip=request.META['REMOTE_ADDR'], page=gbpage) + + elif not ug_works[10:11]: + ug_more = 'no' + results = gluejar_search(q, user_ip=request.META['REMOTE_ADDR'], page=1) + + else: + ug_more = 'yes' + results = models.Work.objects.none() # flag search result as on wishlist as appropriate works = [] @@ -1963,10 +1879,10 @@ def search(request): works.append(result) context = { "q": q, - "gbo": gbo, "ty": ty, "results": works, - "campaign_works": campaign_works + "ug_works": out, + "ug_more": ug_more } return render(request, 'search.html', context) @@ -2004,7 +1920,7 @@ def wishlist(request): except bookloader.LookupFailure: logger.warning("failed to load googlebooks_id %s" % googlebooks_id) return HttpResponse('error adding googlebooks id') - except Exception, e: + except Exception as e: logger.warning("Error in wishlist adding %s" % (e)) return HttpResponse('error adding googlebooks id') # TODO: redirect to work page, when it exists @@ -2066,10 +1982,7 @@ def get_context_data(self, **kwargs): users.year = users.filter(date_joined__year = date_today().year) users.month = users.year.filter(date_joined__month = date_today().month) users.yesterday = users.filter(date_joined__range = (date_today()-timedelta(days=1), date_today())) - users.gr = users.filter(profile__goodreads_user_id__isnull = False) users.lt = users.exclude(profile__librarything_id = '') - users.fb = users.filter(profile__facebook_id__isnull = False) - users.tw = users.exclude(profile__twitter_id = '') users.libtools = users.filter(libpref__isnull = False) works = models.Work.objects works.today = works.filter(created__range = (date_today(), now())) @@ -2171,153 +2084,9 @@ def get_context_data(self, **kwargs): }) return cd -class GoodreadsDisplayView(TemplateView): - template_name = "goodreads_display.html" - def get_context_data(self, **kwargs): - context = super(GoodreadsDisplayView, self).get_context_data(**kwargs) - session = self.request.session - gr_client = GoodreadsClient(key=settings.GOODREADS_API_KEY, secret=settings.GOODREADS_API_SECRET) - - user = self.request.user - if user.is_authenticated(): - api_key = ApiKey.objects.filter(user=user)[0].key - context['api_key'] = api_key - - if user.profile.goodreads_user_id is None: - # calculate the Goodreads authorization URL - (context["goodreads_auth_url"], request_token) = gr_client.begin_authorization(self.request.build_absolute_uri(reverse('goodreads_cb'))) - logger.info("goodreads_auth_url: %s" %(context["goodreads_auth_url"])) - # store request token in session so that we can redeem it for auth_token if authorization works - session['goodreads_request_token'] = request_token['oauth_token'] - session['goodreads_request_secret'] = request_token['oauth_token_secret'] - else: - gr_shelves = gr_client.shelves_list(user_id=user.profile.goodreads_user_id) - context["shelves_info"] = gr_shelves - gr_shelf_load_form = GoodreadsShelfLoadingForm() - # load the shelves into the form - choices = [('all:%d' % (gr_shelves["total_book_count"]),'all (%d)' % (gr_shelves["total_book_count"]))] + \ - [("%s:%d" % (s["name"], s["book_count"]) ,"%s (%d)" % (s["name"], s["book_count"])) for s in gr_shelves["user_shelves"]] - gr_shelf_load_form.fields['goodreads_shelf_name_number'].widget = Select(choices=tuple(choices)) - - context["gr_shelf_load_form"] = gr_shelf_load_form - -# also load any CeleryTasks associated with the user - context["celerytasks"] = models.CeleryTask.objects.filter(user=user) - - return context - -@login_required -def goodreads_auth(request): - - # calculate the Goodreads authorization URL - gr_client = GoodreadsClient(key=settings.GOODREADS_API_KEY, secret=settings.GOODREADS_API_SECRET) - (goodreads_auth_url, request_token) = gr_client.begin_authorization(request.build_absolute_uri(reverse('goodreads_cb'))) - logger.info("goodreads_auth_url: %s" %(goodreads_auth_url)) - # store request token in session so that we can redeem it for auth_token if authorization works - request.session['goodreads_request_token'] = request_token['oauth_token'] - request.session['goodreads_request_secret'] = request_token['oauth_token_secret'] - - return HttpResponseRedirect(goodreads_auth_url) - -@login_required -def goodreads_cb(request): - """handle callback from Goodreads""" - - session = request.session - authorized_flag = request.GET['authorize'] # is it '1'? - request_oauth_token = request.GET['oauth_token'] - - if authorized_flag == '1': - request_token = {'oauth_token': session.get('goodreads_request_token'), - 'oauth_token_secret': session.get('goodreads_request_secret')} - gr_client = GoodreadsClient(key=settings.GOODREADS_API_KEY, secret=settings.GOODREADS_API_SECRET) - - access_token = gr_client.complete_authorization(request_token) - - # store the access token in the user profile - profile = request.user.profile - profile.goodreads_auth_token = access_token["oauth_token"] - profile.goodreads_auth_secret = access_token["oauth_token_secret"] - - # let's get the userid, username - user = gr_client.auth_user() - - profile.goodreads_user_id = user["userid"] - profile.goodreads_user_name = user["name"] - profile.goodreads_user_link = user["link"] - - profile.save() # is this needed? - - # redirect to the Goodreads display page -- should observe some next later - return HttpResponseRedirect(reverse('home')) - -@require_POST -@login_required -@csrf_exempt -def goodreads_flush_assoc(request): - user = request.user - if user.is_authenticated(): - profile = user.profile - profile.goodreads_user_id = None - profile.goodreads_user_name = None - profile.goodreads_user_link = None - profile.goodreads_auth_token = None - profile.goodreads_auth_secret = None - profile.save() - logger.info('Goodreads association flushed for user %s', user) - return HttpResponseRedirect(reverse('goodreads_display')) - -@require_POST -@login_required -@csrf_exempt -def goodreads_load_shelf(request): - """ - a view to allow user load goodreads shelf into her wishlist - """ - # Should be moved to the API - goodreads_shelf_name_number = request.POST.get('goodreads_shelf_name_number', 'all:0') - user = request.user - try: - # parse out shelf name and expected number of books - (shelf_name, expected_number_of_books) = re.match(r'^(.*):(\d+)$', goodreads_shelf_name_number).groups() - expected_number_of_books = int(expected_number_of_books) - logger.info('Adding task to load shelf %s to user %s with %d books', shelf_name, user, expected_number_of_books) - load_task_name = "load_goodreads_shelf_into_wishlist" - load_task = getattr(tasks, load_task_name) - task_id = load_task.delay(user.id, shelf_name, expected_number_of_books=expected_number_of_books) - ct = models.CeleryTask() - ct.task_id = task_id - ct.function_name = load_task_name - ct.user = user - ct.description = "Loading Goodread shelf %s to user %s with %s books" % (shelf_name, user, expected_number_of_books) - ct.save() - - return HttpResponse("We're on it! Reload the page to see the books we've snagged so far.") - except Exception, e: - return HttpResponse("Error in loading shelf: %s " % (e)) - logger.info("Error in loading shelf for user %s: %s ", user, e) -@login_required -def goodreads_calc_shelves(request): - - # we should move towards calculating this only if needed (perhaps with Ajax), caching previous results, etc to speed up - # performance - - if request.user.profile.goodreads_user_id is not None: - gr_client = GoodreadsClient(key=settings.GOODREADS_API_KEY, secret=settings.GOODREADS_API_SECRET) - goodreads_shelves = gr_client.shelves_list(user_id=request.user.profile.goodreads_user_id) - #goodreads_shelf_load_form = GoodreadsShelfLoadingForm() - ## load the shelves into the form - #choices = [('all:%d' % (goodreads_shelves["total_book_count"]),'all (%d)' % (goodreads_shelves["total_book_count"]))] + \ - # [("%s:%d" % (s["name"], s["book_count"]) ,"%s (%d)" % (s["name"], s["book_count"])) for s in goodreads_shelves["user_shelves"]] - #goodreads_shelf_load_form.fields['goodreads_shelf_name_number'].widget = Select(choices=tuple(choices)) - else: - goodreads_shelf_load_form = None - - return HttpResponse(json.dumps(goodreads_shelves), content_type="application/json") - @require_POST @login_required @@ -2347,7 +2116,7 @@ def librarything_load(request): ct.save() return HttpResponse("We're on it! Reload the page to see the books we've snagged so far.") - except Exception, e: + except Exception as e: return HttpResponse("Error in loading LibraryThing library: %s " % (e)) logger.info("Error in loading LibraryThing for user %s: %s ", user, e) @@ -2359,7 +2128,7 @@ def clear_wishlist(request): request.user.wishlist.works.clear() logger.info("Wishlist for user %s cleared", request.user) return HttpResponse('wishlist cleared') - except Exception, e: + except Exception as e: logger.info("Error in clearing wishlist for user %s: %s ", request.user, e) return HttpResponse("Error in clearing wishlist: %s " % (e)) @@ -2407,7 +2176,7 @@ def clear_celery_tasks(request): request.user.tasks.clear() logger.info("Celery tasks for user %s cleared", request.user) return HttpResponse('Celery Tasks List cleared') - except Exception, e: + except Exception as e: logger.info("Error in clearing Celery Tasks for user %s: %s ", request.user, e) return HttpResponse("Error in clearing Celery Tasks: %s " % (e)) @@ -2435,40 +2204,16 @@ def work_librarything(request, work_id): def work_openlibrary(request, work_id): work = safe_get_work(work_id) - isbns = ["ISBN:" + i.value for i in work.identifiers.filter(type='isbn')] url = None if work.openlibrary_id: url = work.openlibrary_url - elif len(isbns) > 0: - isbns = ",".join(isbns) - u = 'https://openlibrary.org/api/books?bibkeys=%s&jscmd=data&format=json' % isbns - try: - j = json.loads(requests.get(u).content) - # as long as there were some matches get the first one and route to it - if len(j.keys()) > 0: - first = j.keys()[0] - url = "https://openlibrary.org" + j[first]['key'] - except ValueError: - # fail at openlibrary - logger.warning("failed to get OpenLibrary json at %s" % u) # fall back to doing a search on openlibrary if not url: q = urlencode({'q': work.title + " " + work.author()}) url = "https://openlibrary.org/search?" + q return HttpResponseRedirect(url) -def work_goodreads(request, work_id): - work = safe_get_work(work_id) - isbn = work.first_isbn_13() - if work.goodreads_id: - url = work.goodreads_url - elif isbn: - url = "https://www.goodreads.com/book/isbn/%s" % isbn - else: - q = urlencode({'query': work.title + " " + work.author()}) - url = "https://www.goodreads.com/search?" + q - return HttpResponseRedirect(url) @login_required def emailshare(request, action): @@ -2557,13 +2302,13 @@ def feedback(request, recipient='unglueit@ebookfoundation.org', template='feedba context['num2'] = request.POST['num2'] else: - if request.user.is_authenticated(): + if request.user.is_authenticated: context['sender'] = request.user.email try: context['page'] = request.GET['page'] except: context['page'] = '/' - if not context.has_key('subject'): + if not 'subject' in context: context['subject'] = "Feedback on page "+context['page'] form = FeedbackForm(initial=context) context['form'] = form @@ -2618,11 +2363,11 @@ def show_beg(self): if not self.campaign or self.campaign.type != THANKS: return False elif self.user_license and self.user_license.thanked: - return self.request.GET.has_key('offer_id') or self.request.POST.has_key('offer_id') + return 'offer_id' in self.request.GET or 'offer_id' in self.request.POST elif self.lib_thanked: return False elif self.campaign.status != 'ACTIVE': - return self.request.GET.has_key('testmode') or self.request.POST.has_key('testmode') + return 'testmode' in self.request.GET or 'testmode' in self.request.POST else: return True @@ -2642,7 +2387,7 @@ def form_valid(self, form): return HttpResponse("Our attempt to set up your contribution failed. We have logged this problem.") def get_form_kwargs(self): - if self.kwargs.has_key('work'): + if 'work' in self.kwargs: self.work = self.kwargs["work"] self.show_beg = lambda: False else: @@ -2652,11 +2397,11 @@ def get_form_kwargs(self): self.lib_thanked = self.work.lib_thanked(self.request.user) self.data = { 'preapproval_amount':self.get_preapproval_amount(), - 'anonymous':True if self.request.user.is_anonymous() else self.request.user.profile.anon_pref, + 'anonymous':True if self.request.user.is_anonymous else self.request.user.profile.anon_pref, } if self.request.method == 'POST': self.data.update(self.request.POST.dict()) - if not self.request.POST.has_key('anonymous'): + if not 'anonymous' in self.request.POST: del self.data['anonymous'] return {'data':self.data} else: @@ -2671,13 +2416,13 @@ def get_context_data(self, **kwargs): unglued_ebooks = work.ebooks().filter(edition__unglued=True) other_ebooks = work.ebooks().filter(edition__unglued=False) - xfer_url = kindle_url = None + xfer_url = None acq = None formats = {} # a dict of format name and url for ebook in work.ebooks().all(): formats[ebook.format] = reverse('download_ebook', args=[ebook.id]) - if request.user.is_authenticated(): + if request.user.is_authenticated: #add a fave request.user.wishlist.add_work(work,'download') @@ -2691,12 +2436,10 @@ def get_context_data(self, **kwargs): # prepare this acq for download if not an_acq.watermarked or an_acq.watermarked.expired: if not an_acq.on_reserve: - watermark_acq.delay(an_acq) + watermark_acq.delay(an_acq.id) acq = an_acq formats['epub'] = reverse('download_acq', kwargs={'nonce':acq.nonce, 'format':'epub'}) - formats['mobi'] = reverse('download_acq', kwargs={'nonce':acq.nonce, 'format':'mobi'}) xfer_url = settings.BASE_URL_SECURE + formats['epub'] - kindle_url = settings.BASE_URL_SECURE + formats['mobi'] can_kindle = True break @@ -2708,7 +2451,7 @@ def get_context_data(self, **kwargs): #send to kindle try: - kindle_ebook = non_google_ebooks.filter(format='mobi')[0] + kindle_ebook = non_google_ebooks.filter(format='epub')[0] can_kindle = kindle_ebook.kindle_sendable() except IndexError: try: @@ -2732,7 +2475,6 @@ def get_context_data(self, **kwargs): 'other_ebooks': other_ebooks, 'formats': formats, 'xfer_url': xfer_url, - 'kindle_url': kindle_url, 'dropbox_key': settings.DROPBOX_KEY, 'can_kindle': can_kindle, 'base_url': settings.BASE_URL_SECURE, @@ -2750,8 +2492,8 @@ def get_context_data(self, **kwargs): 'action': "Contribution", 'user_license': self.user_license, 'lib_thanked': self.lib_thanked, - 'amount': D(self.request.session.pop('amount')/100) if self.request.session.has_key('amount') else None, - 'testmode': self.request.GET.has_key('testmode') or self.request.POST.has_key('testmode'), + 'amount': D(self.request.session.pop('amount') // 100) if 'amount' in self.request.session else None, + 'testmode': 'testmode' in self.request.GET or 'testmode' in self.request.POST, 'source': self.request.GET.get('source', self.request.POST.get('source', '')), }) @@ -2818,7 +2560,7 @@ def download_ebook(request, ebook_id): return HttpResponseRedirect(ebook.url) def download_purchased(request, work_id): - if request.user.is_anonymous(): + if request.user.is_anonymous: HttpResponseRedirect('/accounts/login/download/') return DownloadView.as_view()(request, work_id=work_id) @@ -2847,8 +2589,6 @@ def download_acq(request, nonce, format): acq.borrow() if format == 'epub': return HttpResponseRedirect(acq.get_epub_url()) - else: - return HttpResponseRedirect(acq.get_mobi_url()) def about(request, facet): template = "about_" + facet + ".html" @@ -2868,12 +2608,12 @@ def receive_gift(request, nonce): # put nonce in session so we know that a user has redeemed a Gift request.session['gift_nonce'] = nonce if gift.used: - if request.user.is_authenticated(): + if request.user.is_authenticated: #check that user hasn't redeemed the gift themselves if (gift.acq.user_id == request.user.id) and not gift.acq.expired: return HttpResponseRedirect(reverse('display_gift', args=[gift.id,'existing'])) return render(request, 'gift_error.html', context) - if request.user.is_authenticated(): + if request.user.is_authenticated: user_license = work.get_user_license(request.user) if user_license and user_license.purchased: # check if previously purchased- there would be two user licenses if so. @@ -3030,7 +2770,7 @@ def local_response(request, javascript, context, message): work = safe_get_work(work_id) context = {'work':work} acq = None - if request.user.is_authenticated(): + if request.user.is_authenticated: all_acqs = request.user.acqs.filter(work=work).order_by('-created') for an_acq in all_acqs: if not an_acq.expired: @@ -3041,7 +2781,7 @@ def local_response(request, javascript, context, message): # prepare this acq for download if not an_acq.watermarked or an_acq.watermarked.expired: if not an_acq.on_reserve: - watermark_acq.delay(an_acq) + watermark_acq.delay(an_acq.id) acq = an_acq break @@ -3051,7 +2791,7 @@ def local_response(request, javascript, context, message): else: non_google_ebooks = work.ebooks().exclude(provider='Google Books') try: - ebook = non_google_ebooks.filter(format='mobi')[0] + ebook = non_google_ebooks.filter(format='epub')[0] except IndexError: try: ebook = non_google_ebooks.filter(format='pdf')[0] @@ -3064,14 +2804,14 @@ def local_response(request, javascript, context, message): title = ebook.edition.work.kindle_safe_title() context['ebook'] = ebook - if request.POST.has_key('kindle_email'): + if 'kindle_email' in request.POST: kindle_email = request.POST['kindle_email'] try: validate_email(kindle_email) except ValidationError: return local_response(request, javascript, context, 3) request.session['kindle_email'] = kindle_email - elif request.user.is_authenticated(): + elif request.user.is_authenticated: kindle_email = request.user.profile.kindle_email context['kindle_email'] = kindle_email @@ -3109,7 +2849,7 @@ def local_response(request, javascript, context, message): logger.error('Unexpected error: %s', sys.exc_info()) return local_response(request, javascript, context, 1) - if request.POST.has_key('kindle_email') and not request.user.is_authenticated(): + if 'kindle_email' in request.POST and not request.user.is_authenticated: return HttpResponseRedirect(reverse('superlogin')) return local_response(request, javascript, context, 2) @@ -3134,7 +2874,7 @@ class LibModeView(FormView): success_url = reverse_lazy('marc') def form_valid(self, form): - enable = form.data.has_key('enable') + enable = 'enable' in form.data if enable: try: libpref = self.request.user.libpref diff --git a/frontend/views/bibedit.py b/frontend/views/bibedit.py index afc095258..58abff309 100644 --- a/frontend/views/bibedit.py +++ b/frontend/views/bibedit.py @@ -3,7 +3,7 @@ ''' from django.contrib.auth.decorators import login_required from django.core.files.storage import default_storage -from django.core.urlresolvers import reverse +from django.urls import reverse from django.db.models import Q from django.http import ( HttpResponseRedirect, @@ -23,6 +23,7 @@ from regluit.core.loaders import add_by_webpage from regluit.core.loaders.doab import add_by_doab from regluit.core.loaders.utils import ids_from_urls +from regluit.core.models import Subject from regluit.frontend.forms import EditionForm, IdentifierForm from .rh_views import user_is_rh @@ -32,7 +33,7 @@ def user_can_edit_work(user, work): ''' Check if a user is allowed to edit the work ''' - if user.is_anonymous(): + if user.is_anonymous: return False elif user.is_staff : return True @@ -56,16 +57,6 @@ def safe_get_work(work_id): raise Http404 return work -def add_subject(subject_name, work, authority=''): - ''' - add a subject to a work - ''' - try: - subject = models.Subject.objects.get(name=subject_name) - except models.Subject.DoesNotExist: - subject = models.Subject.objects.create(name=subject_name, authority=authority) - subject.works.add(work) - def get_edition(edition_id): ''' get edition and 404 if not found @@ -76,7 +67,7 @@ def get_edition(edition_id): raise Http404 (duplicate-code) def user_edition(edition, user): - if user and user.is_authenticated() and edition: + if user and user.is_authenticated and edition: user.profile.works.add(edition.work) return edition @@ -96,37 +87,38 @@ def get_edition_for_id(id_type, id_value, user=None): return ident.edition if ident.edition else ident.work.preferred_edition #need to make a new edition - if identifiers.has_key('goog'): + if 'goog' in identifiers: edition = add_by_googlebooks_id(identifiers['goog']) if edition: return user_edition(edition, user) - if identifiers.has_key('isbn'): + if 'isbn' in identifiers: edition = add_by_isbn(identifiers['isbn']) if edition: return user_edition(edition, user) - if identifiers.has_key('doab'): + if 'doab' in identifiers: edition = add_by_doab(identifiers['doab']) if edition: return user_edition(edition, user) - if identifiers.has_key('oclc'): + if 'oclc' in identifiers: edition = add_by_oclc(identifiers['oclc']) if edition: return user_edition(edition, user) - if identifiers.has_key('glue'): + if 'glue' in identifiers: try: work = models.safe_get_work(identifiers['glue']) return work.preferred_edition except: pass - if identifiers.has_key('http'): + if 'http' in identifiers: edition = add_by_webpage(identifiers['http'], user=user) - return user_edition(edition, user) + if edition: + return user_edition(edition, user) # return a dummy edition and identifier @@ -137,7 +129,7 @@ def get_edition_for_id(id_type, id_value, user=None): for key in identifiers.keys(): if key == 'glue': id_value = work.id - if key not in ('http', 'goog', 'oclc', 'isbn'): + if key not in ('goog', 'oclc', 'isbn'): if key in WORK_IDENTIFIERS: edid = str(edition.id) models.Identifier.objects.create(type='edid', value=edid, work=work, edition=edition) @@ -169,12 +161,13 @@ def new_edition(request, by=None): else: edition = get_edition_for_id(id_type, id_value, user=request.user) - return HttpResponseRedirect( - reverse('new_edition', kwargs={ - 'work_id': edition.work_id, - 'edition_id': edition.id - }) - ) + if edition: + return HttpResponseRedirect( + reverse('new_edition', kwargs={ + 'work_id': edition.work_id, + 'edition_id': edition.id + }) + ) else: form = IdentifierForm() return render(request, 'new_edition.html', {'form': form, 'alert':alert}) @@ -226,43 +219,60 @@ def edit_edition(request, work_id, edition_id, by=None): 'title': title, } if request.method == 'POST': - keep_editing = request.POST.has_key('add_author_submit') + keep_editing = 'add_author_submit' in request.POST form = None - edition.new_authors = zip( + edition.new_authors = list(zip( request.POST.getlist('new_author'), request.POST.getlist('new_author_relation') - ) + )) edition.new_subjects = request.POST.getlist('new_subject') if edition.id and admin: for author in edition.authors.all(): - if request.POST.has_key('delete_author_%s' % author.id): + if 'delete_author_%s' % author.id in request.POST: edition.remove_author(author) form = EditionForm(instance=edition, data=request.POST, files=request.FILES) keep_editing = True break work_rels = models.WorkRelation.objects.filter(Q(to_work=work) | Q(from_work=work)) for work_rel in work_rels: - if request.POST.has_key('delete_work_rel_%s' % work_rel.id): + if 'delete_work_rel_%s' % work_rel.id in request.POST: work_rel.delete() form = EditionForm(instance=edition, data=request.POST, files=request.FILES) keep_editing = True break - activate_all = request.POST.has_key('activate_all_ebooks') - deactivate_all = request.POST.has_key('deactivate_all_ebooks') + activate_all = 'activate_all_ebooks' in request.POST + deactivate_all = 'deactivate_all_ebooks' in request.POST ebookchange = False - if request.POST.has_key('set_ebook_rights') and request.POST.has_key('set_rights'): + if 'set_ebook_rights' in request.POST and 'set_rights' in request.POST: rights = request.POST['set_rights'] for ebook in work.ebooks_all(): ebook.rights = rights ebook.save() ebookchange = True - for ebook in work.ebooks_all(): - if request.POST.has_key('activate_ebook_%s' % ebook.id) or activate_all: - ebook.activate() - ebookchange = True - elif request.POST.has_key('deactivate_ebook_%s' % ebook.id) or deactivate_all: - ebook.deactivate() - ebookchange = True + if 'activate_recent_ebooks' in request.POST: + done_fmt = set() + for ebook in work.ebooks_all(): + for fmt in ['pdf', 'epub', 'mobi']: + if ebook.format == fmt: + if fmt not in done_fmt: + ebook.activate() + done_fmt.add(fmt) + else: + ebook.deactivate() + ebookchange = True + else: + for ebook in work.ebooks_all(): + ebook_key = 'activate_ebook_%s' % ebook.id + if ebook_key in request.POST and "activate_selected_ebooks" in request.POST: + ebook_action = request.POST[ebook_key] + if ebook.active : + if ebook_action == 'deactivate': + ebook.deactivate() + ebookchange = True + else: + if ebook_action == 'activate': + ebook.activate() + ebookchange = True if ebookchange: keep_editing = True form = EditionForm(instance=edition, data=request.POST, files=request.FILES) @@ -321,7 +331,7 @@ def edit_edition(request, work_id, edition_id, by=None): work=work ) for relator in edition.relators.all(): - if request.POST.has_key('change_relator_%s' % relator.id): + if 'change_relator_%s' % relator.id in request.POST: new_relation = request.POST['change_relator_%s' % relator.id] relator.set(new_relation) related_work = form.cleaned_data['add_related_work'] @@ -333,13 +343,13 @@ def edit_edition(request, work_id, edition_id, by=None): ) for (author_name, author_relation) in edition.new_authors: edition.add_author(author_name, author_relation) - if form.cleaned_data.has_key('bisac'): + if 'bisac' in form.cleaned_data: bisacsh = form.cleaned_data['bisac'] while bisacsh: - add_subject(bisacsh.full_label, work, authority="bisacsh") + Subject.set_by_name(bisacsh.full_label, work, authority="bisacsh") bisacsh = bisacsh.parent for subject_name in edition.new_subjects: - add_subject(subject_name, work) + Subject.set_by_name(subject_name, work) work_url = reverse('work', kwargs={'work_id': edition.work_id}) cover_file = form.cleaned_data.get("coverfile", None) if cover_file: @@ -349,7 +359,7 @@ def edit_edition(request, work_id, edition_id, by=None): edition.pk, cover_file.name ) - new_file = default_storage.open(cover_file_name, 'w') + new_file = default_storage.open(cover_file_name, 'wb') new_file.write(cover_file.read()) new_file.close() #and put its url into cover_image diff --git a/frontend/views/rh_views.py b/frontend/views/rh_views.py index 03410506b..fc0a83098 100644 --- a/frontend/views/rh_views.py +++ b/frontend/views/rh_views.py @@ -3,7 +3,7 @@ import logging from django.conf import settings -from django.core.urlresolvers import reverse, reverse_lazy +from django.urls import reverse, reverse_lazy from django.forms.models import modelformset_factory from django.http import HttpResponseRedirect, Http404 from django.shortcuts import render, get_object_or_404 @@ -39,7 +39,7 @@ def form_valid(self, form): return super(RHAgree, self).form_valid(form) def rh_admin(request, facet='top'): - if not request.user.is_authenticated() or not request.user.is_staff: + if not request.user.is_authenticated or not request.user.is_staff: return render(request, "admins_only.html") PendingFormSet = modelformset_factory(models.RightsHolder, fields=['approved'], extra=0) @@ -65,7 +65,7 @@ def rh_admin(request, facet='top'): return render(request, "rights_holders.html", context) def user_is_rh(user): - if user.is_anonymous(): + if user.is_anonymous: return False for rh in user.rights_holder.filter(approved=True): return True @@ -86,11 +86,11 @@ def form_valid(self, form): if not models.Claim.objects.filter( work=work, rights_holder=rights_holder, - ).exclude(status='release').count(): + ).exclude(status='release').exists(): form.save() return HttpResponseRedirect(reverse('rightsholders')) - def get_context_data(self, form): + def get_context_data(self, form=None): try: work = form.cleaned_data['work'] except AttributeError: @@ -108,7 +108,7 @@ def claim(request): return ClaimView.as_view()(request) def rh_tools(request, template_name='rh_intro.html'): - if not request.user.is_authenticated() : + if not request.user.is_authenticated: return render(request, 'rh_intro.html') claims = request.user.claim.filter(user=request.user) campaign_form = "xxx" @@ -117,7 +117,7 @@ def rh_tools(request, template_name='rh_intro.html'): for claim in claims: if claim.can_open_new: if request.method == 'POST' and \ - request.POST.has_key('cl_%s-work' % claim.id) and \ + 'cl_%s-work' % claim.id in request.POST and \ int(request.POST['cl_%s-work' % claim.id]) == claim.work_id : claim.campaign_form = OpenCampaignForm( data = request.POST, @@ -153,7 +153,7 @@ def rh_tools(request, template_name='rh_intro.html'): if claim.campaign: if claim.campaign.status in ['ACTIVE','INITIALIZED']: e_m_key = 'edit_managers_%s' % claim.campaign.id - if request.method == 'POST' and request.POST.has_key(e_m_key): + if request.method == 'POST' and e_m_key in request.POST: claim.campaign.edit_managers_form = EditManagersForm( instance=claim.campaign, data=request.POST, @@ -174,7 +174,7 @@ def rh_tools(request, template_name='rh_intro.html'): new_campaign = None for campaign in campaigns: if campaign.clonable(): - if request.method == 'POST' and request.POST.has_key('c%s-campaign_id'% campaign.id): + if request.method == 'POST' and 'c%s-campaign_id'% campaign.id in request.POST: clone_form = CloneCampaignForm(data=request.POST, prefix = 'c%s' % campaign.id) if clone_form.is_valid(): campaign.clone() @@ -198,7 +198,7 @@ def manage_campaign(request, id, ebf=None, action='manage'): campaign.not_manager = False campaign.problems = [] - if (not request.user.is_authenticated()) or \ + if (not request.user.is_authenticated) or \ (not request.user in campaign.managers.all() and not request.user.is_staff): campaign.not_manager = True return render(request, 'manage_campaign.html', {'campaign': campaign}) @@ -211,7 +211,7 @@ def manage_campaign(request, id, ebf=None, action='manage'): offer.offer_form = OfferForm(instance=offer, prefix='offer_%d'%offer.id) if request.method == 'POST' : - if request.POST.has_key('add_premium') : + if 'add_premium' in request.POST : new_premium_form = CustomPremiumForm(data=request.POST) if new_premium_form.is_valid(): new_premium_form.save() @@ -221,7 +221,7 @@ def manage_campaign(request, id, ebf=None, action='manage'): alerts.append(_('New premium has not been added')) form = ManageCampaignForm(instance=campaign) activetab = '#2' - elif request.POST.has_key('save') or request.POST.has_key('launch') : + elif 'save' in request.POST or 'launch' in request.POST : form = ManageCampaignForm(instance=campaign, data=request.POST) if form.is_valid(): form.save() @@ -235,7 +235,7 @@ def manage_campaign(request, id, ebf=None, action='manage'): campaign.update_left() if campaign.type is THANKS : campaign.work.description = form.cleaned_data['work_description'] - tasks.process_ebfs.delay(campaign) + tasks.process_ebfs.delay(campaign.id) campaign.work.save() alerts.append(_('Campaign data has been saved')) activetab = '#2' @@ -250,9 +250,9 @@ def manage_campaign(request, id, ebf=None, action='manage'): else: alerts.append(_('Campaign has NOT been launched')) new_premium_form = CustomPremiumForm(initial={'campaign': campaign}) - elif request.POST.has_key('inactivate') : + elif 'inactivate' in request.POST : activetab = '#2' - if request.POST.has_key('premium_id'): + if 'premium_id' in request.POST: premiums_to_stop = request.POST.getlist('premium_id') for premium_to_stop in premiums_to_stop: selected_premium = models.Premium.objects.get(id=premium_to_stop) @@ -262,9 +262,9 @@ def manage_campaign(request, id, ebf=None, action='manage'): alerts.append(_('Premium %s has been inactivated'% premium_to_stop)) form = ManageCampaignForm(instance=campaign) new_premium_form = CustomPremiumForm(initial={'campaign': campaign}) - elif request.POST.has_key('change_offer'): + elif 'change_offer' in request.POST: for offer in offers : - if request.POST.has_key('offer_%d-work' % offer.id) : + if 'offer_%d-work' % offer.id in request.POST : offer.offer_form = OfferForm( instance=offer, data = request.POST, @@ -281,16 +281,6 @@ def manage_campaign(request, id, ebf=None, action='manage'): new_premium_form = CustomPremiumForm(data={'campaign': campaign}) activetab = '#2' else: - if action == 'makemobi': - try: - ebookfile = get_object_or_404(models.EbookFile, id=ebf) - except ValueError: - raise Http404 - - tasks.make_mobi.delay(ebookfile) - return HttpResponseRedirect(reverse('mademobi', args=[campaign.id])) - elif action == 'mademobi': - alerts.append('A MOBI file is being generated') form = ManageCampaignForm( instance=campaign, initial={'work_description':campaign.work.description} diff --git a/libraryauth/__init__.py b/libraryauth/__init__.py index 0b8d24204..06ad287f7 100644 --- a/libraryauth/__init__.py +++ b/libraryauth/__init__.py @@ -6,4 +6,4 @@ class LibraryAuthConfig(AppConfig): name = 'regluit.libraryauth' def ready(self): - from . import signals \ No newline at end of file + from . import signals diff --git a/libraryauth/admin.py b/libraryauth/admin.py index 11eb8217f..65101187a 100644 --- a/libraryauth/admin.py +++ b/libraryauth/admin.py @@ -1,12 +1,12 @@ -from . import models +from django import forms +from django.contrib.admin import ModelAdmin, site, register +from django.contrib.auth.models import User -from selectable.forms import AutoCompleteSelectWidget,AutoCompleteSelectField from selectable.base import ModelLookup +from selectable.forms import AutoCompleteSelectWidget, AutoCompleteSelectField from selectable.registry import registry -from django import forms -from django.contrib.admin import ModelAdmin, site -from django.contrib.auth.models import User, Group +from . import models class UserLookup(ModelLookup): model = User @@ -16,39 +16,41 @@ class UserLookup(ModelLookup): class LibraryAdminForm(forms.ModelForm): user = AutoCompleteSelectField( - UserLookup, - widget=AutoCompleteSelectWidget(UserLookup), - required=True, - ) + UserLookup, + widget=AutoCompleteSelectWidget(UserLookup), + required=True, + ) owner = AutoCompleteSelectField( - UserLookup, - widget=AutoCompleteSelectWidget(UserLookup), - required=True, - ) + UserLookup, + widget=AutoCompleteSelectWidget(UserLookup), + required=True, + ) class Meta(object): model = models.Library - widgets= {'group':forms.HiddenInput} + widgets = {'group':forms.HiddenInput} exclude = ('group', ) - - + +@register(models.Library) class LibraryAdmin(ModelAdmin): list_display = ('user', ) form = LibraryAdminForm search_fields = ['user__username'] +@register(models.Block) class BlockAdmin(ModelAdmin): - list_display = ('library', 'lower', 'upper',) + list_display = ('library', 'lower_IP', 'upper_IP',) search_fields = ('library__name', 'lower', 'upper',) +@register(models.CardPattern) class CardPatternAdmin(ModelAdmin): list_display = ('library', 'pattern', 'checksum',) search_fields = ('library__name', ) +@register(models.EmailPattern) class EmailPatternAdmin(ModelAdmin): list_display = ('library', 'pattern', ) search_fields = ('library__name',) -site.register(models.Library, LibraryAdmin) -site.register(models.Block, BlockAdmin) -site.register(models.CardPattern, CardPatternAdmin) -site.register(models.EmailPattern, EmailPatternAdmin) \ No newline at end of file +@register(models.BadUsernamePattern) +class EmailPatternAdmin(ModelAdmin): + list_display = ('pattern', 'last') diff --git a/libraryauth/auth.py b/libraryauth/auth.py index 3876fd49d..24780d426 100644 --- a/libraryauth/auth.py +++ b/libraryauth/auth.py @@ -1,71 +1,51 @@ import logging -from django.http import HttpResponse +import requests + from django.shortcuts import redirect from django.utils.http import urlquote -from social.pipeline.social_auth import associate_by_email -from social.apps.django_app.default.models import UserSocialAuth -from social.apps.django_app.middleware import SocialAuthExceptionMiddleware -from social.exceptions import (AuthAlreadyAssociated,SocialAuthBaseException) -from social.utils import social_logger +from django.core.files.base import ContentFile +from django.core.files.storage import default_storage +from social_core.pipeline.social_auth import associate_by_email +from social_core.exceptions import (AuthAlreadyAssociated, SocialAuthBaseException) +from social_django.middleware import SocialAuthExceptionMiddleware ANONYMOUS_AVATAR = '/static/images/header/avatar.png' -(NO_AVATAR, GRAVATAR, TWITTER, FACEBOOK, PRIVATETAR) = (0, 1, 2, 3, 4) -AVATARS = (NO_AVATAR, GRAVATAR, TWITTER, FACEBOOK, PRIVATETAR) +(NO_AVATAR, GRAVATAR, TWITTER, PRIVATETAR) = (0, 1, 2, 4) +AVATARS = (NO_AVATAR, GRAVATAR, TWITTER, PRIVATETAR) + logger = logging.getLogger(__name__) +def pic_storage_url(user, backend, url): + pic_file_name = '/pic/{}/{}'.format(backend, user) + # download cover image to cover_file + try: + r = requests.get(url) + pic_file = ContentFile(r.content) + content_type = r.headers.get('content-type', '') + if u'text' in content_type: + logger.warning('Cover return text for pic_url={}'.format(url)) + return None + pic_file.content_type = content_type + default_storage.save(pic_file_name, pic_file) + return default_storage.url(pic_file_name) + except Exception as e: + # if there is a problem, return None for cover URL + logger.warning('Failed to store cover for username={}'.format(user)) + return None + def selectively_associate_by_email(backend, details, user=None, *args, **kwargs): - """ - Associate current auth with a user with the same email address in the DB. - This pipeline entry is not 100% secure unless you know that the providers - enabled enforce email verification on their side, otherwise a user can - attempt to take over another user account by using the same (not validated) - email address on some provider. - - Not using Facebook or Twitter to authenticate a user. - """ - if backend.name in ('twitter', 'facebook'): - return None - return associate_by_email(backend, details, user=None, *args, **kwargs) + return associate_by_email(backend, details, user=None, *args, **kwargs) -def facebook_extra_values( user, extra_data): - try: - facebook_id = extra_data.get('id') - user.profile.facebook_id = facebook_id - if user.profile.avatar_source is None or user.profile.avatar_source is PRIVATETAR: - user.profile.avatar_source = FACEBOOK - user.profile.save() - return True - except Exception,e: - logger.error(e) - return False - -def twitter_extra_values( user, extra_data): - try: - twitter_id = extra_data.get('screen_name') - profile_image_url = extra_data.get('profile_image_url_https') - user.profile.twitter_id = twitter_id - if user.profile.avatar_source is None or user.profile.avatar_source in (TWITTER, PRIVATETAR): - user.profile.pic_url = profile_image_url - if user.profile.avatar_source is None or user.profile.avatar_source is PRIVATETAR: - user.profile.avatar_source = TWITTER - user.profile.save() - return True - except Exception,e: - logger.error(e) - return False - -def deliver_extra_data(backend, user, social, *args, **kwargs): - if backend.name is 'twitter': - twitter_extra_values( user, social.extra_data) - if backend.name is 'facebook': - facebook_extra_values( user, social.extra_data) + +def deliver_extra_data(backend, user, social, response, *args, **kwargs): + pass # following is needed because of length limitations in a unique constrain for MySQL def chop_username(username, *args, **kwargs): - if username and len(username)>222: + if username and len(username) > 222: return {'username':username[0:222]} def selective_social_user(backend, uid, user=None, *args, **kwargs): @@ -73,9 +53,8 @@ def selective_social_user(backend, uid, user=None, *args, **kwargs): social = backend.strategy.storage.user.get_social_auth(provider, uid) if social: if user and social.user != user: - if backend.name not in ('twitter', 'facebook'): - msg = 'This {0} account is already in use.'.format(provider) - raise AuthAlreadyAssociated(backend, msg) + msg = 'This {0} account is already in use.'.format(provider) + raise AuthAlreadyAssociated(backend, msg) elif not user: user = social.user return {'social': social, @@ -90,15 +69,15 @@ class SocialAuthExceptionMiddlewareWithoutMessages(SocialAuthExceptionMiddleware """ a modification of SocialAuthExceptionMiddleware to pass backend and message without attempting django.messages - """ + """ def process_exception(self, request, exception): - + if isinstance(exception, SocialAuthBaseException): backend = getattr(request, 'backend', None) backend_name = getattr(backend, 'name', 'unknown-backend') message = self.get_message(request, exception) - social_logger.error(message) + logger.warning(message) url = self.get_redirect_uri(request, exception) url += ('?' in url and '&' or '?') + \ diff --git a/libraryauth/backends.py b/libraryauth/backends.py index 028a3bab5..4c0adfe2d 100644 --- a/libraryauth/backends.py +++ b/libraryauth/backends.py @@ -2,12 +2,16 @@ to make a backend named you need to... 1. make a class 2. with a function authenticate(self, request, library) - returns true if can request.user can be authenticated to the library, and attaches a credential property to the library object + returns true if can request.user can be authenticated to the library, + and attaches a credential property to the library object returns fals if otherwise. 3. with a class authenticator - with a process((self, authenticator, success_url, deny_url) method which is expected to return a response -4. make a libraryauth/_join.html template (authenticator will be in its context) to insert a link or form for a user to join the library -5. if you need to show the user a form, define a model form class form with init method __init__(self, request, library, *args, **kwargs) + with a process((self, authenticator, success_url, deny_url) method + which is expected to return a response +4. make a libraryauth/_join.html template (authenticator will be in its context) + to insert a link or form for a user to join the library +5. if you need to show the user a form, define a model form class form with init method + __init__(self, request, library, *args, **kwargs) and model LibraryUser 6. define an admin form to let the library configure its authentication 7. add new auth choice to Library.backend choices and the admin as desired @@ -20,75 +24,78 @@ from django.shortcuts import render from .models import Block, IP, LibraryUser, CardPattern, EmailPattern - + logger = logging.getLogger(__name__) class ip: - def authenticate(self,request, library): + def authenticate(self, request, library): try: ip = IP(request.META['REMOTE_ADDR']) blocks = Block.objects.filter(Q(lower=ip) | Q(lower__lte=ip, upper__gte=ip)) for block in blocks: - if block.library==library: + if block.library == library: logger.info('%s authenticated for %s from %s'%(request.user, library, ip)) - library.credential=ip + library.credential = ip return True return False except KeyError: return False - + class authenticator(): def process(self, caller, success_url, deny_url): return HttpResponseRedirect(deny_url) - + form = None - + class admin_form(forms.ModelForm): class Meta: model = Block exclude = ("library",) -class cardnum: - def authenticate(self,request, library): +class cardnum: + def authenticate(self, request, library): return False class authenticator(): def process(self, caller, success_url, deny_url): - if caller.form and caller.request.method=='POST' and caller.form.is_valid(): + if caller.form and caller.request.method == 'POST' and caller.form.is_valid(): library = caller.form.cleaned_data['library'] library.credential = caller.form.cleaned_data['credential'] - logger.info('%s authenticated for %s from %s'%(caller.request.user, caller.library, caller.form.cleaned_data.get('number'))) + logger.info('%s authenticated for %s from %s' % ( + caller.request.user, + caller.library, + caller.form.cleaned_data.get('number'), + )) library.add_user(caller.form.cleaned_data['user']) return HttpResponseRedirect(success_url) - else: - return render(caller.request, 'libraryauth/library.html', { - 'library':caller.library, - 'authenticator':caller, - }) + return render(caller.request, 'libraryauth/library.html', { + 'library':caller.library, + 'authenticator':caller, + }) class admin_form(forms.ModelForm): class Meta: model = CardPattern exclude = ("library",) - + class form(forms.ModelForm): credential = forms.RegexField( - label="Enter Your Library Card Number", - max_length=20, - regex=r'^\d+$', - required = True, - help_text = "(digits only)", - error_messages = {'invalid': "digits only!",} - ) + label="Enter Your Library Card Number", + max_length=20, + regex=r'^\d+$', + required=True, + help_text="(digits only)", + error_messages={'invalid': "digits only!",} + ) def __init__(self, request, library, *args, **kwargs): - if request.method=="POST": - data=request.POST + if request.method == "POST": + data = request.POST super(cardnum.form, self).__init__(data=data) else: - initial={'user':request.user, 'library':library} + initial = {'user':request.user, 'library':library} super(cardnum.form, self).__init__(initial=initial) - + def clean(self): library = self.cleaned_data.get('library', None) credential = self.cleaned_data.get('credential', '') @@ -96,23 +103,23 @@ def clean(self): if card_pattern.is_valid(credential): return self.cleaned_data raise forms.ValidationError("the library card number must be VALID.") - + class Meta: model = LibraryUser - widgets = { 'library': forms.HiddenInput, 'user': forms.HiddenInput } + widgets = {'library': forms.HiddenInput, 'user': forms.HiddenInput} exclude = () -class email: - def authenticate(self,request, library): - if request.user.is_anonymous(): +class email: + def authenticate(self, request, library): + if request.user.is_anonymous: return False email = request.user.email for email_pattern in library.email_auths.all(): if email_pattern.is_valid(email): logger.info('%s authenticated for %s from %s'%(request.user, library, email)) - library.credential=email + library.credential = email return True return False - + class authenticator(): def process(self, caller, success_url, deny_url): return HttpResponseRedirect(deny_url) diff --git a/libraryauth/emailcheck/data.py b/libraryauth/emailcheck/data.py index a0b841e1d..2d7eea303 100644 --- a/libraryauth/emailcheck/data.py +++ b/libraryauth/emailcheck/data.py @@ -1,709 +1,709 @@ blacklist = frozenset([ -'0-mail.com', -'0815.ru', -'0845.ru', -'0clickemail.com', -'0wnd.net', -'0wnd.org', -'10minutemail.com', -'10minutemail.net', -'12houremail.com', -'12minutemail.com', -'163.com', -'1pad.de', -'20minutemail.com', -'2prong.com', -'30minutemail.com', -'3d-painting.com', -'4warding.com', -'4warding.net', -'4warding.org', -'60minutemail.com', -'675hosting.com', -'675hosting.net', -'675hosting.org', -'6url.com', -'75hosting.com', -'75hosting.net', -'75hosting.org', -'7tags.com', -'8127ep.com', -'9ox.net', -'a-bc.net', -'afrobacon.com', -'agedmail.com', -'ajaxapp.net', -'akapost.com', -'akerd.com', -'ama-trade.de', -'ama-trans.de', -'amilegit.com', -'amiri.net', -'amiriindustries.com', -'ano-mail.net', -'anon-mail.de', -'anonbox.net', -'anonmails.de', -'anonymbox.com', -'antichef.com', -'antichef.net', -'antireg.ru', -'antispam.de', -'antispam24.de', -'antispammail.de', -'armyspy.com', -'asdasd.ru', -'b2cmail.de', -'baxomale.ht.cx', -'beefmilk.com', -'binkmail.com', -'bio-muesli.info', -'bio-muesli.net', -'blackmarket.to', -'bobmail.info', -'bodhi.lawlita.com', -'bofthew.com', -'bootybay.de', -'br.mintemail.com', -'breakthru.com', -'brefmail.com', -'brennendesreich.de', -'broadbandninja.com', -'bsnow.net', -'bspamfree.org', -'buffemail.com', -'bugmenever.com', -'bugmenot.com', -'bumpymail.com', -'bund.us', -'byom.de', -'cam4you.cc', -'card.zp.ua', -'casualdx.com', -'cellurl.com', -'centermail.com', -'centermail.net', -'cheatmail.de', -'chogmail.com', -'choicemail1.com', -'consumerriot.com', -'cool.fr.nf', -'correo.blogos.net', -'cosmorph.com', -'courriel.fr.nf', -'courrieltemporaire.com', -'cubiclink.com', -'curryworld.de', -'cust.in', -'cuvox.de', -'dacoolest.com', -'dandikmail.com', -'dayrep.com', -'dbunker.com', -'deadaddress.com', -'deadspam.com', -'dealja.com', -'delikkt.de', -'despam.it', -'despammed.com', -'devnullmail.com', -'dfgh.net', -'digitalsanctuary.com', -'dingbone.com', -'discardmail.com', -'discardmail.de', -'disposableaddress.com', -'disposableemailaddresses:emailmiser.com', -'disposeamail.com', -'disposemail.com', -'dispostable.com', -'dm.w3internet.co.ukexample.com', -'dodgeit.com', -'dodgit.com', -'dodgit.org', -'donemail.ru', -'dontreg.com', -'dontsendmespam.de', -'dotman.de', -'dropcake.de', -'dudmail.com', -'dump-email.info', -'dumpandjunk.com', -'dumpmail.de', -'dumpyemail.com', -'duskmail.com', -'e4ward.com', -'easytrashmail.com', -'edv.to', -'einmalmail.de', -'einrot.com', -'eintagsmail.de', -'email60.com', -'emaildienst.de', -'emailgo.de', -'emailias.com', -'emailigo.de', -'emailinfive.com', -'emaillime.com', -'emailmiser.com', -'emailsensei.com', -'emailtemporanea.com', -'emailtemporanea.net', -'emailtemporario.com.br', -'emailto.de', -'emailwarden.com', -'emailx.at.hm', -'emailxfer.com', -'emz.net', -'enterto.com', -'ephemail.net', -'ero-tube.org', -'etranquil.com', -'etranquil.net', -'etranquil.org', -'explodemail.com', -'express.net.ua', -'eyepaste.com', -'fakedemail.com', -'fakeinbox.com', -'fakeinformation.com', -'fakemail.fr', -'fakemailgenerator.com', -'fansworldwide.de', -'fastacura.com', -'fastchevy.com', -'fastchrysler.com', -'fastkawasaki.com', -'fastmazda.com', -'fastmitsubishi.com', -'fastnissan.com', -'fastsubaru.com', -'fastsuzuki.com', -'fasttoyota.com', -'fastyamaha.com', -'film-blog.biz', -'filzmail.com', -'fivemail.de', -'fizmail.com', -'fly-ts.de', -'flyspam.com', -'fr33mail.info', -'frapmail.com', -'front14.org', -'fudgerub.com', -'fux0ringduh.com', -'fyii.de', -'garbagemail.org', -'garliclife.com', -'gehensiemirnichtaufdensack.de', -'geschent.biz', -'get1mail.com', -'get2mail.fr', -'getairmail.com', -'getmails.eu', -'getonemail.com', -'getonemail.net', -'ghosttexter.de', -'giantmail.de', -'girlsundertheinfluence.com', -'gishpuppy.com', -'gmal.com', -'gmial.com', -'gomail.in', -'gowikibooks.com', -'gowikicampus.com', -'gowikicars.com', -'gowikifilms.com', -'gowikigames.com', -'gowikimusic.com', -'gowikinetwork.com', -'gowikitravel.com', -'gowikitv.com', -'great-host.in', -'greensloth.com', -'gsrv.co.uk', -'guerillamail.biz', -'guerillamail.com', -'guerillamail.net', -'guerillamail.org', -'guerrillamail.biz', -'guerrillamail.com', -'guerrillamail.de', -'guerrillamail.info', -'guerrillamail.net', -'guerrillamail.org', -'guerrillamailblock.com', -'h.mintemail.com', -'h8s.org', -'haltospam.com', -'hat-geld.de', -'hatespam.org', -'hidemail.de', -'hmamail.com', -'hochsitze.com', -'hotmai.com', -'hotmial.com', -'hotpop.com', -'hulapla.de', -'humaility.com', -'ieatspam.eu', -'ieatspam.info', -'ieh-mail.de', -'ignoremail.com', -'ihateyoualot.info', -'iheartspam.org', -'ikbenspamvrij.nl', -'imails.info', -'inboxclean.com', -'inboxclean.org', -'inboxed.im', -'inboxed.pw', -'incognitomail.com', -'incognitomail.net', -'incognitomail.org', -'infocom.zp.ua', -'insorg-mail.info', -'instant-mail.de', -'ip6.li', -'ipoo.org', -'irish2me.com', -'is.af', -'iwi.net', -'jetable.com', -'jetable.fr.nf', -'jetable.net', -'jetable.org', -'jnxjn.com', -'junk.to', -'junk1e.com', -'kasmail.com', -'kaspop.com', -'keepmymail.com', -'killmail.com', -'killmail.net', -'kir.ch.tc', -'klassmaster.com', -'klassmaster.net', -'klzlk.com', -'kostenlosemailadresse.de', -'koszmail.pl', -'kulturbetrieb.info', -'kurzepost.de', -'lawlita.com', -'letthemeatspam.com', -'lhsdv.com', -'lifebyfood.com', -'link2mail.net', -'linuxmail.so', -'litedrop.com', -'llogin.ru', -'lol.ovpn.to', -'lolfreak.net', -'lookugly.com', -'lopl.co.cc', -'lortemail.dk', -'losemymail.com', -'lr78.com', -'luckymail.org', -'m21.cc', -'m4ilweb.info', -'maboard.com', -'mail-temporaire.fr', -'mail.by', -'mail.mezimages.net', -'mail.zp.ua', -'mail1a.de', -'mail21.cc', -'mail2rss.org', -'mail333.com', -'mail4trash.com', -'mailbidon.com', -'mailbiz.biz', -'mailblocks.com', -'mailcatch.com', -'mailde.de', -'mailde.info', -'maildrop.cc', -'maileater.com', -'maileimer.de', -'mailexpire.com', -'mailforspam.com', -'mailfreeonline.com', -'mailin8r.com', -'mailinater.com', -'mailinator.com', -'mailinator.net', -'mailinator2.com', -'mailincubator.com', -'mailita.tk', -'mailme.ir', -'mailme.lv', -'mailme24.com', -'mailmetrash.com', -'mailmoat.com', -'mailms.com', -'mailnator.com', -'mailnesia.com', -'mailnull.com', -'mailorg.org', -'mailscrap.com', -'mailseal.de', -'mailshell.com', -'mailsiphon.com', -'mailslite.com', -'mailtome.de', -'mailtrash.net', -'mailtv.net', -'mailtv.tv', -'mailzilla.com', -'mailzilla.org', -'makemetheking.com', -'malahov.de', -'mbx.cc', -'mega.zik.dj', -'meinspamschutz.de', -'meltmail.com', -'messagebeamer.de', -'mierdamail.com', -'ministry-of-silly-walks.de', -'mintemail.com', -'misterpinball.de', -'moburl.com', -'moncourrier.fr.nf', -'monemail.fr.nf', -'monmail.fr.nf', -'msa.minsmail.com', -'mt2009.com', -'mt2014.com', -'mx0.wwwnew.eu', -'mycard.net.ua', -'mycleaninbox.net', -'mypartyclip.de', -'myphantomemail.com', -'mysamp.de', -'myspaceinc.com', -'myspaceinc.net', -'myspaceinc.org', -'myspacepimpedup.com', -'myspamless.com', -'mytempmail.com', -'mytrashmail.com', -'nabuma.com', -'neomailbox.com', -'nepwk.com', -'nervmich.net', -'nervtmich.net', -'netmails.com', -'netmails.net', -'netzidiot.de', -'neverbox.com', -'nevermail.de', -'nincsmail.hu', -'no-spam.ws', -'nobugmail.com', -'nobulk.com', -'nobuma.com', -'noclickemail.com', -'nogmailspam.info', -'nomail.pw', -'nomail.xl.cx', -'nomail2me.com', -'nomorespamemails.com', -'nospam.ze.tc', -'nospam4.us', -'nospamfor.us', -'nospammail.net', -'nospamthanks.info', -'notmailinator.com', -'nowmymail.com', -'nurfuerspam.de', -'nus.edu.sg', -'nwldx.com', -'objectmail.com', -'obobbo.com', -'odnorazovoe.ru', -'ohaaa.de', -'omail.pro', -'oneoffemail.com', -'oneoffmail.com', -'onewaymail.com', -'onlatedotcom.info', -'online.ms', -'oopi.org', -'ordinaryamerican.net', -'otherinbox.com', -'ourklips.com', -'outlawspam.com', -'ovpn.to', -'owlpic.com', -'pancakemail.com', -'pimpedupmyspace.com', -'pjjkp.com', -'plexolan.de', -'politikerclub.de', -'poofy.org', -'pookmail.com', -'powered.name', -'privacy.net', -'privatdemail.net', -'privy-mail.de', -'privymail.de', -'proxymail.eu', -'prtnx.com', -'punkass.com', -'put2.net', -'putthisinyourspamdatabase.com', -'quickinbox.com', -'rcpt.at', -'realtyalerts.ca', -'receiveee.com', -'recode.me', -'recursor.net', -'regbypass.com', -'regbypass.comsafe-mail.net', -'rejectmail.com', -'rhyta.com', -'rklips.com', -'rmqkr.net', -'rppkn.com', -'rtrtr.com', -'s0ny.net', -'safe-mail.net', -'safersignup.de', -'safetymail.info', -'safetypost.de', -'sandelf.de', -'saynotospams.com', -'schafmail.de', -'schmeissweg.tk', -'schrott-email.de', -'secmail.pw', -'secretemail.de', -'secure-mail.biz', -'secure-mail.cc', -'selfdestructingmail.com', -'sendspamhere.com', -'senseless-entertainment.com', -'server.ms', -'sharklasers.com', -'shieldemail.com', -'shiftmail.com', -'shitmail.me', -'shortmail.net', -'shut.name', -'shut.ws', -'sibmail.com', -'sinnlos-mail.de', -'skeefmail.com', -'sky-ts.de', -'slaskpost.se', -'slopsbox.com', -'smashmail.de', -'smellfear.com', -'snakemail.com', -'sneakemail.com', -'sneakmail.de', -'snkmail.com', -'sofimail.com', -'sofort-mail.de', -'sofortmail.de', -'sogetthis.com', -'soodonims.com', -'spam.la', -'spam.su', -'spam4.me', -'spamail.de', -'spamavert.com', -'spambob.com', -'spambob.net', -'spambob.org', -'spambog.com', -'spambog.de', -'spambog.ru', -'spambox.info', -'spambox.irishspringrealty.com', -'spambox.us', -'spamcannon.com', -'spamcannon.net', -'spamcero.com', -'spamcon.org', -'spamcorptastic.com', -'spamcowboy.com', -'spamcowboy.net', -'spamcowboy.org', -'spamday.com', -'spamex.com', -'spamfree.eu', -'spamfree24.com', -'spamfree24.de', -'spamfree24.eu', -'spamfree24.info', -'spamfree24.net', -'spamfree24.org', -'spamgourmet.com', -'spamgourmet.net', -'spamgourmet.org', -'spamherelots.com', -'spamhereplease.com', -'spamhole.com', -'spamify.com', -'spaminator.de', -'spamkill.info', -'spaml.com', -'spaml.de', -'spammotel.com', -'spamobox.com', -'spamoff.de', -'spamslicer.com', -'spamspot.com', -'spamthis.co.uk', -'spamthisplease.com', -'spamtrail.com', -'speed.1s.fr', -'spoofmail.de', -'squizzy.de', -'sry.li', -'stinkefinger.net', -'stuffmail.de', -'super-auswahl.de', -'supergreatmail.com', -'supermailer.jp', -'superstachel.de', -'suremail.info', -'tagyourself.com', -'teewars.org', -'teleworm.com', -'teleworm.us', -'temp-mail.org', -'temp-mail.ru', -'tempail.com', -'tempalias.com', -'tempe-mail.com', -'tempemail.biz', -'tempemail.co.za', -'tempemail.com', -'tempemail.net', -'tempinbox.co.uk', -'tempinbox.com', -'tempmail.eu', -'tempmail.it', -'tempmail2.com', -'tempmailer.com', -'tempmailer.de', -'tempomail.fr', -'temporarily.de', -'temporarioemail.com.br', -'temporaryemail.net', -'temporaryforwarding.com', -'temporaryinbox.com', -'temporarymailaddress.com', -'thanksnospam.info', -'thankyou2010.com', -'thc.st', -'thisisnotmyrealemail.com', -'thismail.net', -'throwawayemailaddress.com', -'tilien.com', -'tittbit.in', -'tmailinator.com', -'tokem.co', -'topranklist.de', -'tormail.org', -'tradermail.info', -'trash-amil.com', -'trash-mail.at', -'trash-mail.com', -'trash-mail.de', -'trash2009.com', -'trashdevil.com', -'trashdevil.de', -'trashemail.de', -'trashinbox.com', -'trashmail.at', -'trashmail.com', -'trashmail.de', -'trashmail.me', -'trashmail.net', -'trashmail.org', -'trashmail.ws', -'trashmailer.com', -'trashymail.com', -'trashymail.net', -'trialmail.de', -'trillianpro.com', -'turual.com', -'twinmail.de', -'tyldd.com', -'uggsrock.com', -'upliftnow.com', -'uplipht.com', -'us.af', -'venompen.com', -'veryrealemail.com', -'vidchart.com', -'viditag.com', -'viewcastmedia.com', -'viewcastmedia.net', -'viewcastmedia.org', -'vipmail.name', -'vipmail.pw', -'vpn.st', -'vsimcard.com', -'wasteland.rfc822.org', -'watch-harry-potter.com', -'watchfull.net', -'webm4il.info', -'weg-werf-email.de', -'wegwerf-email-adressen.de', -'wegwerf-email.de', -'wegwerf-email.net', -'wegwerf-emails.de', -'wegwerfadresse.de', -'wegwerfemail.com', -'wegwerfemail.de', -'wegwerfemail.net', -'wegwerfemail.org', -'wegwerfemailadresse.com', -'wegwerfmail.de', -'wegwerfmail.net', -'wegwerfmail.org', -'wetrainbayarea.com', -'wetrainbayarea.org', -'wh4f.org', -'whatpaas.com', -'whyspam.me', -'willhackforfood.biz', -'willselfdestruct.com', -'winemaven.info', -'wolfsmail.tk', -'writeme.us', -'wronghead.com', -'wuzup.net', -'wuzupmail.net', -'www.e4ward.com', -'www.gishpuppy.com', -'www.mailinator.com', -'wwwnew.eu', -'x.ip6.li', -'xagloo.com', -'xemaps.com', -'xents.com', -'xmaily.com', -'xoxy.net', -'yanet.me', -'yep.it', -'yogamaven.com', -'yopmail.com', -'yopmail.fr', -'yopmail.net', -'youmailr.com', -'ypmail.webarnak.fr.eu.org', -'yuurok.com', -'yxzx.net', -'z1p.biz', -'zehnminuten.de', -'zehnminutenmail.de', -'zippymail.info', -'zoaxe.com', -'zoemail.org', + '0-mail.com', + '0815.ru', + '0845.ru', + '0clickemail.com', + '0wnd.net', + '0wnd.org', + '10minutemail.com', + '10minutemail.net', + '12houremail.com', + '12minutemail.com', + '163.com', + '1pad.de', + '20minutemail.com', + '2prong.com', + '30minutemail.com', + '3d-painting.com', + '4warding.com', + '4warding.net', + '4warding.org', + '60minutemail.com', + '675hosting.com', + '675hosting.net', + '675hosting.org', + '6url.com', + '75hosting.com', + '75hosting.net', + '75hosting.org', + '7tags.com', + '8127ep.com', + '9ox.net', + 'a-bc.net', + 'afrobacon.com', + 'agedmail.com', + 'ajaxapp.net', + 'akapost.com', + 'akerd.com', + 'ama-trade.de', + 'ama-trans.de', + 'amilegit.com', + 'amiri.net', + 'amiriindustries.com', + 'ano-mail.net', + 'anon-mail.de', + 'anonbox.net', + 'anonmails.de', + 'anonymbox.com', + 'antichef.com', + 'antichef.net', + 'antireg.ru', + 'antispam.de', + 'antispam24.de', + 'antispammail.de', + 'armyspy.com', + 'asdasd.ru', + 'b2cmail.de', + 'baxomale.ht.cx', + 'beefmilk.com', + 'binkmail.com', + 'bio-muesli.info', + 'bio-muesli.net', + 'blackmarket.to', + 'bobmail.info', + 'bodhi.lawlita.com', + 'bofthew.com', + 'bootybay.de', + 'br.mintemail.com', + 'breakthru.com', + 'brefmail.com', + 'brennendesreich.de', + 'broadbandninja.com', + 'bsnow.net', + 'bspamfree.org', + 'buffemail.com', + 'bugmenever.com', + 'bugmenot.com', + 'bumpymail.com', + 'bund.us', + 'byom.de', + 'cam4you.cc', + 'card.zp.ua', + 'casualdx.com', + 'cellurl.com', + 'centermail.com', + 'centermail.net', + 'cheatmail.de', + 'chogmail.com', + 'choicemail1.com', + 'consumerriot.com', + 'cool.fr.nf', + 'correo.blogos.net', + 'cosmorph.com', + 'courriel.fr.nf', + 'courrieltemporaire.com', + 'cubiclink.com', + 'curryworld.de', + 'cust.in', + 'cuvox.de', + 'dacoolest.com', + 'dandikmail.com', + 'dayrep.com', + 'dbunker.com', + 'deadaddress.com', + 'deadspam.com', + 'dealja.com', + 'delikkt.de', + 'despam.it', + 'despammed.com', + 'devnullmail.com', + 'dfgh.net', + 'digitalsanctuary.com', + 'dingbone.com', + 'discardmail.com', + 'discardmail.de', + 'disposableaddress.com', + 'disposableemailaddresses:emailmiser.com', + 'disposeamail.com', + 'disposemail.com', + 'dispostable.com', + 'dm.w3internet.co.ukexample.com', + 'dodgeit.com', + 'dodgit.com', + 'dodgit.org', + 'donemail.ru', + 'dontreg.com', + 'dontsendmespam.de', + 'dotman.de', + 'dropcake.de', + 'dudmail.com', + 'dump-email.info', + 'dumpandjunk.com', + 'dumpmail.de', + 'dumpyemail.com', + 'duskmail.com', + 'e4ward.com', + 'easytrashmail.com', + 'edv.to', + 'einmalmail.de', + 'einrot.com', + 'eintagsmail.de', + 'email60.com', + 'emaildienst.de', + 'emailgo.de', + 'emailias.com', + 'emailigo.de', + 'emailinfive.com', + 'emaillime.com', + 'emailmiser.com', + 'emailsensei.com', + 'emailtemporanea.com', + 'emailtemporanea.net', + 'emailtemporario.com.br', + 'emailto.de', + 'emailwarden.com', + 'emailx.at.hm', + 'emailxfer.com', + 'emz.net', + 'enterto.com', + 'ephemail.net', + 'ero-tube.org', + 'etranquil.com', + 'etranquil.net', + 'etranquil.org', + 'explodemail.com', + 'express.net.ua', + 'eyepaste.com', + 'fakedemail.com', + 'fakeinbox.com', + 'fakeinformation.com', + 'fakemail.fr', + 'fakemailgenerator.com', + 'fansworldwide.de', + 'fastacura.com', + 'fastchevy.com', + 'fastchrysler.com', + 'fastkawasaki.com', + 'fastmazda.com', + 'fastmitsubishi.com', + 'fastnissan.com', + 'fastsubaru.com', + 'fastsuzuki.com', + 'fasttoyota.com', + 'fastyamaha.com', + 'film-blog.biz', + 'filzmail.com', + 'fivemail.de', + 'fizmail.com', + 'fly-ts.de', + 'flyspam.com', + 'fr33mail.info', + 'frapmail.com', + 'front14.org', + 'fudgerub.com', + 'fux0ringduh.com', + 'fyii.de', + 'garbagemail.org', + 'garliclife.com', + 'gehensiemirnichtaufdensack.de', + 'geschent.biz', + 'get1mail.com', + 'get2mail.fr', + 'getairmail.com', + 'getmails.eu', + 'getonemail.com', + 'getonemail.net', + 'ghosttexter.de', + 'giantmail.de', + 'girlsundertheinfluence.com', + 'gishpuppy.com', + 'gmal.com', + 'gmial.com', + 'gomail.in', + 'gowikibooks.com', + 'gowikicampus.com', + 'gowikicars.com', + 'gowikifilms.com', + 'gowikigames.com', + 'gowikimusic.com', + 'gowikinetwork.com', + 'gowikitravel.com', + 'gowikitv.com', + 'great-host.in', + 'greensloth.com', + 'gsrv.co.uk', + 'guerillamail.biz', + 'guerillamail.com', + 'guerillamail.net', + 'guerillamail.org', + 'guerrillamail.biz', + 'guerrillamail.com', + 'guerrillamail.de', + 'guerrillamail.info', + 'guerrillamail.net', + 'guerrillamail.org', + 'guerrillamailblock.com', + 'h.mintemail.com', + 'h8s.org', + 'haltospam.com', + 'hat-geld.de', + 'hatespam.org', + 'hidemail.de', + 'hmamail.com', + 'hochsitze.com', + 'hotmai.com', + 'hotmial.com', + 'hotpop.com', + 'hulapla.de', + 'humaility.com', + 'ieatspam.eu', + 'ieatspam.info', + 'ieh-mail.de', + 'ignoremail.com', + 'ihateyoualot.info', + 'iheartspam.org', + 'ikbenspamvrij.nl', + 'imails.info', + 'inboxclean.com', + 'inboxclean.org', + 'inboxed.im', + 'inboxed.pw', + 'incognitomail.com', + 'incognitomail.net', + 'incognitomail.org', + 'infocom.zp.ua', + 'insorg-mail.info', + 'instant-mail.de', + 'ip6.li', + 'ipoo.org', + 'irish2me.com', + 'is.af', + 'iwi.net', + 'jetable.com', + 'jetable.fr.nf', + 'jetable.net', + 'jetable.org', + 'jnxjn.com', + 'junk.to', + 'junk1e.com', + 'kasmail.com', + 'kaspop.com', + 'keepmymail.com', + 'killmail.com', + 'killmail.net', + 'kir.ch.tc', + 'klassmaster.com', + 'klassmaster.net', + 'klzlk.com', + 'kostenlosemailadresse.de', + 'koszmail.pl', + 'kulturbetrieb.info', + 'kurzepost.de', + 'lawlita.com', + 'letthemeatspam.com', + 'lhsdv.com', + 'lifebyfood.com', + 'link2mail.net', + 'linuxmail.so', + 'litedrop.com', + 'llogin.ru', + 'lol.ovpn.to', + 'lolfreak.net', + 'lookugly.com', + 'lopl.co.cc', + 'lortemail.dk', + 'losemymail.com', + 'lr78.com', + 'luckymail.org', + 'm21.cc', + 'm4ilweb.info', + 'maboard.com', + 'mail-temporaire.fr', + 'mail.by', + 'mail.mezimages.net', + 'mail.zp.ua', + 'mail1a.de', + 'mail21.cc', + 'mail2rss.org', + 'mail333.com', + 'mail4trash.com', + 'mailbidon.com', + 'mailbiz.biz', + 'mailblocks.com', + 'mailcatch.com', + 'mailde.de', + 'mailde.info', + 'maildrop.cc', + 'maileater.com', + 'maileimer.de', + 'mailexpire.com', + 'mailforspam.com', + 'mailfreeonline.com', + 'mailin8r.com', + 'mailinater.com', + 'mailinator.com', + 'mailinator.net', + 'mailinator2.com', + 'mailincubator.com', + 'mailita.tk', + 'mailme.ir', + 'mailme.lv', + 'mailme24.com', + 'mailmetrash.com', + 'mailmoat.com', + 'mailms.com', + 'mailnator.com', + 'mailnesia.com', + 'mailnull.com', + 'mailorg.org', + 'mailscrap.com', + 'mailseal.de', + 'mailshell.com', + 'mailsiphon.com', + 'mailslite.com', + 'mailtome.de', + 'mailtrash.net', + 'mailtv.net', + 'mailtv.tv', + 'mailzilla.com', + 'mailzilla.org', + 'makemetheking.com', + 'malahov.de', + 'mbx.cc', + 'mega.zik.dj', + 'meinspamschutz.de', + 'meltmail.com', + 'messagebeamer.de', + 'mierdamail.com', + 'ministry-of-silly-walks.de', + 'mintemail.com', + 'misterpinball.de', + 'moburl.com', + 'moncourrier.fr.nf', + 'monemail.fr.nf', + 'monmail.fr.nf', + 'msa.minsmail.com', + 'mt2009.com', + 'mt2014.com', + 'mx0.wwwnew.eu', + 'mycard.net.ua', + 'mycleaninbox.net', + 'mypartyclip.de', + 'myphantomemail.com', + 'mysamp.de', + 'myspaceinc.com', + 'myspaceinc.net', + 'myspaceinc.org', + 'myspacepimpedup.com', + 'myspamless.com', + 'mytempmail.com', + 'mytrashmail.com', + 'nabuma.com', + 'neomailbox.com', + 'nepwk.com', + 'nervmich.net', + 'nervtmich.net', + 'netmails.com', + 'netmails.net', + 'netzidiot.de', + 'neverbox.com', + 'nevermail.de', + 'nincsmail.hu', + 'no-spam.ws', + 'nobugmail.com', + 'nobulk.com', + 'nobuma.com', + 'noclickemail.com', + 'nogmailspam.info', + 'nomail.pw', + 'nomail.xl.cx', + 'nomail2me.com', + 'nomorespamemails.com', + 'nospam.ze.tc', + 'nospam4.us', + 'nospamfor.us', + 'nospammail.net', + 'nospamthanks.info', + 'notmailinator.com', + 'nowmymail.com', + 'nurfuerspam.de', + 'nus.edu.sg', + 'nwldx.com', + 'objectmail.com', + 'obobbo.com', + 'odnorazovoe.ru', + 'ohaaa.de', + 'omail.pro', + 'oneoffemail.com', + 'oneoffmail.com', + 'onewaymail.com', + 'onlatedotcom.info', + 'online.ms', + 'oopi.org', + 'ordinaryamerican.net', + 'otherinbox.com', + 'ourklips.com', + 'outlawspam.com', + 'ovpn.to', + 'owlpic.com', + 'pancakemail.com', + 'pimpedupmyspace.com', + 'pjjkp.com', + 'plexolan.de', + 'politikerclub.de', + 'poofy.org', + 'pookmail.com', + 'powered.name', + 'privacy.net', + 'privatdemail.net', + 'privy-mail.de', + 'privymail.de', + 'proxymail.eu', + 'prtnx.com', + 'punkass.com', + 'put2.net', + 'putthisinyourspamdatabase.com', + 'quickinbox.com', + 'rcpt.at', + 'realtyalerts.ca', + 'receiveee.com', + 'recode.me', + 'recursor.net', + 'regbypass.com', + 'regbypass.comsafe-mail.net', + 'rejectmail.com', + 'rhyta.com', + 'rklips.com', + 'rmqkr.net', + 'rppkn.com', + 'rtrtr.com', + 's0ny.net', + 'safe-mail.net', + 'safersignup.de', + 'safetymail.info', + 'safetypost.de', + 'sandelf.de', + 'saynotospams.com', + 'schafmail.de', + 'schmeissweg.tk', + 'schrott-email.de', + 'secmail.pw', + 'secretemail.de', + 'secure-mail.biz', + 'secure-mail.cc', + 'selfdestructingmail.com', + 'sendspamhere.com', + 'senseless-entertainment.com', + 'server.ms', + 'sharklasers.com', + 'shieldemail.com', + 'shiftmail.com', + 'shitmail.me', + 'shortmail.net', + 'shut.name', + 'shut.ws', + 'sibmail.com', + 'sinnlos-mail.de', + 'skeefmail.com', + 'sky-ts.de', + 'slaskpost.se', + 'slopsbox.com', + 'smashmail.de', + 'smellfear.com', + 'snakemail.com', + 'sneakemail.com', + 'sneakmail.de', + 'snkmail.com', + 'sofimail.com', + 'sofort-mail.de', + 'sofortmail.de', + 'sogetthis.com', + 'soodonims.com', + 'spam.la', + 'spam.su', + 'spam4.me', + 'spamail.de', + 'spamavert.com', + 'spambob.com', + 'spambob.net', + 'spambob.org', + 'spambog.com', + 'spambog.de', + 'spambog.ru', + 'spambox.info', + 'spambox.irishspringrealty.com', + 'spambox.us', + 'spamcannon.com', + 'spamcannon.net', + 'spamcero.com', + 'spamcon.org', + 'spamcorptastic.com', + 'spamcowboy.com', + 'spamcowboy.net', + 'spamcowboy.org', + 'spamday.com', + 'spamex.com', + 'spamfree.eu', + 'spamfree24.com', + 'spamfree24.de', + 'spamfree24.eu', + 'spamfree24.info', + 'spamfree24.net', + 'spamfree24.org', + 'spamgourmet.com', + 'spamgourmet.net', + 'spamgourmet.org', + 'spamherelots.com', + 'spamhereplease.com', + 'spamhole.com', + 'spamify.com', + 'spaminator.de', + 'spamkill.info', + 'spaml.com', + 'spaml.de', + 'spammotel.com', + 'spamobox.com', + 'spamoff.de', + 'spamslicer.com', + 'spamspot.com', + 'spamthis.co.uk', + 'spamthisplease.com', + 'spamtrail.com', + 'speed.1s.fr', + 'spoofmail.de', + 'squizzy.de', + 'sry.li', + 'stinkefinger.net', + 'stuffmail.de', + 'super-auswahl.de', + 'supergreatmail.com', + 'supermailer.jp', + 'superstachel.de', + 'suremail.info', + 'tagyourself.com', + 'teewars.org', + 'teleworm.com', + 'teleworm.us', + 'temp-mail.org', + 'temp-mail.ru', + 'tempail.com', + 'tempalias.com', + 'tempe-mail.com', + 'tempemail.biz', + 'tempemail.co.za', + 'tempemail.com', + 'tempemail.net', + 'tempinbox.co.uk', + 'tempinbox.com', + 'tempmail.eu', + 'tempmail.it', + 'tempmail2.com', + 'tempmailer.com', + 'tempmailer.de', + 'tempomail.fr', + 'temporarily.de', + 'temporarioemail.com.br', + 'temporaryemail.net', + 'temporaryforwarding.com', + 'temporaryinbox.com', + 'temporarymailaddress.com', + 'thanksnospam.info', + 'thankyou2010.com', + 'thc.st', + 'thisisnotmyrealemail.com', + 'thismail.net', + 'throwawayemailaddress.com', + 'tilien.com', + 'tittbit.in', + 'tmailinator.com', + 'tokem.co', + 'topranklist.de', + 'tormail.org', + 'tradermail.info', + 'trash-amil.com', + 'trash-mail.at', + 'trash-mail.com', + 'trash-mail.de', + 'trash2009.com', + 'trashdevil.com', + 'trashdevil.de', + 'trashemail.de', + 'trashinbox.com', + 'trashmail.at', + 'trashmail.com', + 'trashmail.de', + 'trashmail.me', + 'trashmail.net', + 'trashmail.org', + 'trashmail.ws', + 'trashmailer.com', + 'trashymail.com', + 'trashymail.net', + 'trialmail.de', + 'trillianpro.com', + 'turual.com', + 'twinmail.de', + 'tyldd.com', + 'uggsrock.com', + 'upliftnow.com', + 'uplipht.com', + 'us.af', + 'venompen.com', + 'veryrealemail.com', + 'vidchart.com', + 'viditag.com', + 'viewcastmedia.com', + 'viewcastmedia.net', + 'viewcastmedia.org', + 'vipmail.name', + 'vipmail.pw', + 'vpn.st', + 'vsimcard.com', + 'wasteland.rfc822.org', + 'watch-harry-potter.com', + 'watchfull.net', + 'webm4il.info', + 'weg-werf-email.de', + 'wegwerf-email-adressen.de', + 'wegwerf-email.de', + 'wegwerf-email.net', + 'wegwerf-emails.de', + 'wegwerfadresse.de', + 'wegwerfemail.com', + 'wegwerfemail.de', + 'wegwerfemail.net', + 'wegwerfemail.org', + 'wegwerfemailadresse.com', + 'wegwerfmail.de', + 'wegwerfmail.net', + 'wegwerfmail.org', + 'wetrainbayarea.com', + 'wetrainbayarea.org', + 'wh4f.org', + 'whatpaas.com', + 'whyspam.me', + 'willhackforfood.biz', + 'willselfdestruct.com', + 'winemaven.info', + 'wolfsmail.tk', + 'writeme.us', + 'wronghead.com', + 'wuzup.net', + 'wuzupmail.net', + 'www.e4ward.com', + 'www.gishpuppy.com', + 'www.mailinator.com', + 'wwwnew.eu', + 'x.ip6.li', + 'xagloo.com', + 'xemaps.com', + 'xents.com', + 'xmaily.com', + 'xoxy.net', + 'yanet.me', + 'yep.it', + 'yogamaven.com', + 'yopmail.com', + 'yopmail.fr', + 'yopmail.net', + 'youmailr.com', + 'ypmail.webarnak.fr.eu.org', + 'yuurok.com', + 'yxzx.net', + 'z1p.biz', + 'zehnminuten.de', + 'zehnminutenmail.de', + 'zippymail.info', + 'zoaxe.com', + 'zoemail.org', ]) \ No newline at end of file diff --git a/libraryauth/forms.py b/libraryauth/forms.py index 5bf768380..52b323875 100644 --- a/libraryauth/forms.py +++ b/libraryauth/forms.py @@ -1,22 +1,58 @@ import logging +from random import randint + from django import forms from django.contrib.auth import get_user_model -from django.contrib.auth.forms import AuthenticationForm, PasswordResetForm +from django.contrib.auth.forms import PasswordResetForm from django.contrib.auth.models import User +from django.core.cache import cache from django.utils.translation import ugettext_lazy as _ -from registration.forms import RegistrationForm + + +# hack to fix bug in old version of django-registration +from registration.validators import CONFUSABLE_EMAIL +from confusable_homoglyphs import confusables +def validate_confusables_email(value): + if '@' not in value: + return + parts = value.split('@') + if len(parts) != 2: + raise forms.ValidationError(CONFUSABLE_EMAIL, code='invalid') + local_part, domain = value.split('@') + if confusables.is_dangerous(local_part) or \ + confusables.is_dangerous(domain): + raise forms.ValidationError(CONFUSABLE_EMAIL, code='invalid') + +import registration +registration.validators.validate_confusables_email = validate_confusables_email +# end hack + +from registration.forms import RegistrationFormUniqueEmail from .emailcheck import is_disposable from .models import Library logger = logging.getLogger(__name__) +rands = [randint(0,99) for i in range(0, 21)] +encoder = {k:v for (k,v) in zip(range(0, 21), rands)} +decoder = {v:k for (k,v) in zip(range(0, 21), rands)} + +encode_answers = cache.get('encode_answers') +decode_answers = cache.get('decode_answers') +if not encode_answers: + cache.set('encode_answers', encoder, None) +if not decode_answers: + cache.set('decode_answers', decoder, None) + decode_answers = decoder + + class UserData(forms.Form): username = forms.RegexField( - label=_("New Username"), - max_length=30, + label=_("New Username"), + max_length=30, regex=r'^[\w.@+-]+$', - help_text = _("30 characters or fewer."), - error_messages = { + help_text=_("30 characters or fewer."), + error_messages={ 'invalid': _("This value may contain only letters, numbers and @/./+/-/_ characters.") } ) @@ -35,38 +71,51 @@ def clean_username(self): raise forms.ValidationError(_("Your username is already "+username)) class UserNamePass(UserData): - password1 = forms.CharField(label=_("Password"), - widget=forms.PasswordInput) - password2 = forms.CharField(label=_("Password confirmation"), + password1 = forms.CharField(label=_("Password"), widget=forms.PasswordInput) + password2 = forms.CharField( + label=_("Password confirmation"), widget=forms.PasswordInput, - help_text = _("Enter the same password as above, for verification.")) + help_text=_("Enter the same password as above, for verification.") + ) allow_same = True + notarobot = forms.IntegerField( + label="Please show you're not a robot.", + error_messages={ + 'required': "", + }, + widget=forms.TextInput(attrs={'style': 'width: 2em'}), + ) + encode_answers = cache.get('encode_answers') + def clean_password2(self): password1 = self.cleaned_data.get("password1", "") password2 = self.cleaned_data["password2"] if password1 != password2: raise forms.ValidationError(_("The two passwords don't match.")) + return password2 -class RegistrationFormNoDisposableEmail(RegistrationForm): + def clean_notarobot(self): + notarobot = int(self.data["notarobot"]) + encoded_answer = self.encode_answers.get(notarobot, 'miss') + tries = self.data.get("tries", -1) + if str(encoded_answer) != tries: + raise forms.ValidationError("(Hint: it's addition)") + + return notarobot + +class RegistrationFormNoDisposableEmail(RegistrationFormUniqueEmail, UserNamePass): def clean_email(self): """ Check the supplied email address against a list of known disposable webmail domains. """ + cleaned_email = super(RegistrationFormNoDisposableEmail, self).clean_email() logger.info('cleaning email') - if is_disposable(self.cleaned_data['email']): + if is_disposable(cleaned_email): raise forms.ValidationError(_("Please supply a permanent email address.")) - return self.cleaned_data['email'] - -class AuthForm(AuthenticationForm): - def __init__(self, request=None, *args, **kwargs): - if request and request.method == 'GET': - saved_un= request.COOKIES.get('un', None) - super(AuthForm, self).__init__(initial={"username":saved_un},*args, **kwargs) - else: - super(AuthForm, self).__init__(*args, **kwargs) - + return cleaned_email + class SocialAwarePasswordResetForm(PasswordResetForm): def get_users(self, email): """ @@ -81,40 +130,43 @@ def clean_email(self): if not get_user_model().objects.filter(email__iexact=email, is_active=True).exists(): raise forms.ValidationError("There aren't ungluers with that email address!") return email - + class NewLibraryForm(forms.ModelForm): username = forms.RegexField( - label=_("Library Username"), - max_length=30, + label=_("Library Username"), + max_length=30, regex=r'^[\w.@+-]+$', - help_text = _("30 characters or fewer."), - error_messages = { + help_text=_("30 characters or fewer."), + error_messages={ 'invalid': _("This value may contain only letters, numbers and @/./+/-/_ characters.") }, - initial = '', + initial='', ) email = forms.EmailField( - label=_("notification email address for library"), + label=_("notification email address for library"), max_length=100, error_messages={'required': 'Please enter an email address for the library.'}, ) + def clean_username(self): - username= self.cleaned_data['username'] + username = self.cleaned_data['username'] try: user = User.objects.get(username=username) - raise forms.ValidationError(_("That username is already in use, please choose another.")) + raise forms.ValidationError(_( + "That username is already in use, please choose another." + )) except User.DoesNotExist: self.instance.user = User(username=username) return username - - + + class Meta: model = Library fields = 'name', 'backend', 'email', 'username' widgets = {'name':forms.TextInput(attrs={'size':'40'})} -class LibraryForm(forms.ModelForm): +class LibraryForm(forms.ModelForm): class Meta: model = Library - fields = 'name', 'backend', + fields = 'name', 'backend', diff --git a/libraryauth/migrations/0001_initial.py b/libraryauth/migrations/0001_initial.py index 02d56a064..ebfa6cd18 100644 --- a/libraryauth/migrations/0001_initial.py +++ b/libraryauth/migrations/0001_initial.py @@ -47,9 +47,9 @@ class Migration(migrations.Migration): ('backend', models.CharField(default=b'ip', max_length=10, choices=[(b'ip', b'IP authentication'), (b'cardnum', b'Library Card Number check'), (b'email', b'e-mail pattern check')])), ('name', models.CharField(default=b'', max_length=80)), ('approved', models.BooleanField(default=False)), - ('group', models.OneToOneField(related_name='library', null=True, to='auth.Group')), - ('owner', models.ForeignKey(related_name='libraries', to=settings.AUTH_USER_MODEL)), - ('user', models.OneToOneField(related_name='library', to=settings.AUTH_USER_MODEL)), + ('group', models.OneToOneField(on_delete=models.CASCADE, related_name='library', null=True, to='auth.Group')), + ('owner', models.ForeignKey(on_delete=models.CASCADE, related_name='libraries', to=settings.AUTH_USER_MODEL)), + ('user', models.OneToOneField(on_delete=models.CASCADE, related_name='library', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( @@ -58,23 +58,23 @@ class Migration(migrations.Migration): ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('credential', models.CharField(max_length=30, null=True)), ('date_modified', models.DateTimeField(auto_now=True)), - ('library', models.ForeignKey(related_name='library_users', to='libraryauth.Library')), - ('user', models.ForeignKey(related_name='user_libraries', to=settings.AUTH_USER_MODEL)), + ('library', models.ForeignKey(on_delete=models.CASCADE, related_name='library_users', to='libraryauth.Library')), + ('user', models.ForeignKey(on_delete=models.CASCADE, related_name='user_libraries', to=settings.AUTH_USER_MODEL)), ], ), migrations.AddField( model_name='emailpattern', name='library', - field=models.ForeignKey(related_name='email_auths', to='libraryauth.Library'), + field=models.ForeignKey(on_delete=models.CASCADE, related_name='email_auths', to='libraryauth.Library'), ), migrations.AddField( model_name='cardpattern', name='library', - field=models.ForeignKey(related_name='cardnum_auths', to='libraryauth.Library'), + field=models.ForeignKey(on_delete=models.CASCADE, related_name='cardnum_auths', to='libraryauth.Library'), ), migrations.AddField( model_name='block', name='library', - field=models.ForeignKey(related_name='ip_auths', to='libraryauth.Library'), + field=models.ForeignKey(on_delete=models.CASCADE, related_name='ip_auths', to='libraryauth.Library'), ), ] diff --git a/libraryauth/migrations/0003_badusernamepattern.py b/libraryauth/migrations/0003_badusernamepattern.py new file mode 100644 index 000000000..b9bfdaf26 --- /dev/null +++ b/libraryauth/migrations/0003_badusernamepattern.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.11.14 on 2018-12-06 17:32 +from __future__ import unicode_literals + +from django.db import migrations, models +import django.utils.timezone + + +class Migration(migrations.Migration): + + dependencies = [ + ('libraryauth', '0002_auto_20160727_2214'), + ] + + operations = [ + migrations.CreateModel( + name='BadUsernamePattern', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('pattern', models.CharField(max_length=100)), + ('last', models.DateTimeField(default=django.utils.timezone.now)), + ], + ), + ] diff --git a/libraryauth/migrations/0004_auto_20200214_1347.py b/libraryauth/migrations/0004_auto_20200214_1347.py new file mode 100644 index 000000000..6acf7a939 --- /dev/null +++ b/libraryauth/migrations/0004_auto_20200214_1347.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.11.28 on 2020-02-14 13:47 +from __future__ import unicode_literals + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('libraryauth', '0003_badusernamepattern'), + ] + + operations = [ + migrations.AlterField( + model_name='library', + name='backend', + field=models.CharField(choices=[('ip', 'IP authentication'), ('cardnum', 'Library Card Number check'), ('email', 'e-mail pattern check')], default='ip', max_length=10), + ), + migrations.AlterField( + model_name='library', + name='name', + field=models.CharField(default='', max_length=80), + ), + ] diff --git a/libraryauth/models.py b/libraryauth/models.py index 527c2a82d..574a767ff 100644 --- a/libraryauth/models.py +++ b/libraryauth/models.py @@ -1,63 +1,81 @@ -# IP address part of this of this copied from https://github.com/benliles/django-ipauth/blob/master/ipauth/models.py - +# IP address part of this of this copied from +# https://github.com/benliles/django-ipauth/blob/master/ipauth/models.py +import logging import re -from django.contrib.auth.models import User, Group +from functools import total_ordering + +from django.contrib.auth.models import Group from django.conf import settings from django.core.exceptions import ValidationError from django.core import validators from django.db import models -from django.db.models import Q from django.db.models.signals import post_save +from django.db.utils import OperationalError from django.forms import GenericIPAddressField as BaseIPAddressField -from django.utils.translation import ugettext_lazy as _ -from django.core.urlresolvers import reverse +from django.urls import reverse +from django.utils import timezone + +logger = logging.getLogger(__name__) class Library(models.Model): ''' name and other things derive from the User ''' - user = models.OneToOneField(settings.AUTH_USER_MODEL, related_name='library') - group = models.OneToOneField(Group, related_name='library', null = True) - backend = models.CharField(max_length=10, choices=( - ('ip','IP authentication'), - ('cardnum', 'Library Card Number check'), - ('email', 'e-mail pattern check'), - ),default='ip') - name = models.CharField(max_length=80, default='') + user = models.OneToOneField( + settings.AUTH_USER_MODEL, + on_delete=models.CASCADE, + related_name='library', + ) + group = models.OneToOneField( + Group, + on_delete=models.CASCADE, + related_name='library', + null=True + ) + backend = models.CharField(max_length=10, choices=( + ('ip', 'IP authentication'), + ('cardnum', 'Library Card Number check'), + ('email', 'e-mail pattern check'), + ), default='ip') + name = models.CharField(max_length=80, default='') approved = models.BooleanField(default=False) - owner = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="libraries") + owner = models.ForeignKey( + settings.AUTH_USER_MODEL, + on_delete=models.CASCADE, + related_name="libraries", + ) credential = None - - def __unicode__(self): - return unicode(self.name) - + + def __str__(self): + return str(self.name) + def add_user(self, user): user.groups.add(self.group) (library_user, created) = LibraryUser.objects.get_or_create(library=self, user=user) - library_user.credential=self.credential + library_user.credential = self.credential library_user.save() - + def has_user(self, user): return self.group in user.groups.all() or user == self.user - + @property def join_template(self): if self.approved: return 'libraryauth/' + self.backend + '_join.html' - else: - return 'libraryauth/unapproved.html' + return 'libraryauth/unapproved.html' + @property def help_template(self): return 'libraryauth/' + self.backend + '_help.html' - + def get_absolute_url(self): return reverse('library', args=[self.user.username]) def add_group(sender, created, instance, **kwargs): if created: - num='' + num = '' while created: - (group,created)=Group.objects.get_or_create(name=instance.user.username + num) + (group, created) = Group.objects.get_or_create(name=instance.user.username + num) # make sure not using a group twice! if created: created = False @@ -68,31 +86,31 @@ def add_group(sender, created, instance, **kwargs): created = True except Library.DoesNotExist: pass - instance.group=group + instance.group = group instance.save() post_save.connect(add_group, sender=Library) -def ip_to_long(value): +def ip_to_int(value): validators.validate_ipv4_address(value) lower_validator = validators.MinValueValidator(0) - upper_validator = validators.MinValueValidator(255) + upper_validator = validators.MaxValueValidator(255) - value = value.split('.') + value = [int(octet) for octet in value.split('.')] output = 0 for i in range(0, 4): validators.validate_integer(value[i]) lower_validator(value[i]) upper_validator(value[i]) - output += long(value[i]) * (256**(3-i)) + output += value[i] * (256**(3-i)) return output -def long_to_ip(value): +def int_to_ip(value): validators.validate_integer(value) - value = long(value) + value = int(value) validators.MinValueValidator(0)(value) validators.MaxValueValidator(4294967295)(value) @@ -100,6 +118,7 @@ def long_to_ip(value): return '%d.%d.%d.%d' % (value >> 24, value >> 16 & 255, value >> 8 & 255, value & 255) +@total_ordering class IP(object): def __init__(self, value): self.int = value @@ -109,9 +128,9 @@ def _set_int(self, value): self._int = IP.int try: - self._int = long(value) + self._int = int(value) except ValueError: - self._int = ip_to_long(value) + self._int = ip_to_int(value) except (TypeError, ValidationError): self._int = None @@ -121,8 +140,8 @@ def _get_int(self): int = property(_get_int, _set_int) def _get_str(self): - if self.int!=None: - return long_to_ip(self.int) + if self.int != None: + return int_to_ip(self.int) return '' string = property(_get_str, _set_int) @@ -136,18 +155,15 @@ def __eq__(self, other): return self.int == other.int - def __cmp__(self, other): + def __gt__(self, other): if not isinstance(other, IP): other = IP(other) if self.int is not None and other.int is not None: - return self.int.__cmp__(other.int) + return self.int.__gt__(other.int) raise ValueError('Invalid arguments') - def __unicode__(self): - return self.string - def __str__(self): return self.string @@ -166,7 +182,7 @@ def prepare_value(self, value): return value def to_python(self, value): - if value==0: + if value == 0: return IP(0) if value in validators.EMPTY_VALUES: return None @@ -178,7 +194,6 @@ def to_python(self, value): code='invalid') class IPAddressModelField(models.GenericIPAddressField): - __metaclass__ = models.SubfieldBase empty_strings_allowed = False def __init__(self, *args, **kwargs): @@ -206,31 +221,38 @@ def formfield(self, **kwargs): defaults = {'form_class': IPAddressFormField} defaults.update(kwargs) return super(models.GenericIPAddressField, self).formfield(**defaults) - + def deconstruct(self): name, path, args, kwargs = super(models.GenericIPAddressField, self).deconstruct() return name, path, args, kwargs + class Block(models.Model): - library = models.ForeignKey(Library, related_name='ip_auths') + library = models.ForeignKey(Library, on_delete=models.CASCADE, related_name='ip_auths') lower = IPAddressModelField(db_index=True, unique=True) upper = IPAddressModelField(db_index=True, blank=True, null=True) def clean(self): - if self.upper and self.upper.int: + if self.upper: try: if self.lower > self.upper: raise ValidationError('Lower end of the Block must be less ' 'than or equal to the upper end') - except ValueError, e: + except ValueError as e: pass - def __unicode__(self): - if self.upper and self.upper.int: - return u'%s %s-%s' % (self.library, self.lower, self.upper) + def __str__(self): + if self.upper: + return u'%s %s - %s' % (self.library, IP(self.lower), IP(self.upper)) return u'%s %s' % (self.library, self.lower) + def upper_IP(self): + return IP(self.upper) + + def lower_IP(self): + return IP(self.lower) + class Meta: ordering = ['lower',] @@ -247,36 +269,57 @@ def digits_of(n): for d in even_digits: checksum += sum(digits_of(d*2)) return checksum % 10 - + class CardPattern(models.Model): - library = models.ForeignKey(Library, related_name='cardnum_auths') + library = models.ForeignKey(Library, on_delete=models.CASCADE, related_name='cardnum_auths') # match pattern ^\d+#+$ pattern = models.CharField(max_length=20) checksum = models.BooleanField(default=True) def is_valid(self, card_number): - match_pattern='^' + self.pattern.replace('#','\d',20) + '$' - if re.match(match_pattern,card_number) is None: + match_pattern = '^' + self.pattern.replace('#', r'\d', 20) + '$' + if re.match(match_pattern, card_number) is None: return False if self.checksum: return luhn_checksum(card_number) == 0 - else: - return True + return True class LibraryUser(models.Model): - library = models.ForeignKey(Library, related_name='library_users') - user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='user_libraries') + library = models.ForeignKey(Library, on_delete=models.CASCADE, related_name='library_users') + user = models.ForeignKey( + settings.AUTH_USER_MODEL, + on_delete=models.CASCADE, + related_name='user_libraries', + ) credential = models.CharField(max_length=30, null=True) date_modified = models.DateTimeField(auto_now=True) class EmailPattern(models.Model): - library = models.ForeignKey(Library, related_name='email_auths') + library = models.ForeignKey(Library, on_delete=models.CASCADE, related_name='email_auths') # email endswith string pattern = models.CharField(max_length=20) def is_valid(self, email): - if email.lower().endswith(self.pattern.lower()): + return email.lower().endswith(self.pattern.lower()) + +class BadUsernamePattern(models.Model): + pattern = models.CharField(max_length=100) + last = models.DateTimeField(default=timezone.now) + + def matches(self, username): + if re.match(self.pattern, username): + self.last = timezone.now() + self.save() return True - else: - return False + return False + +def get_special(): + try: + specials = Library.objects.filter(user__username='special') + for special in specials: + return special + return None + except OperationalError: + # database not loaded yet, for example during testing + return None diff --git a/libraryauth/signals.py b/libraryauth/signals.py index e0b43bbf9..9251df2b0 100644 --- a/libraryauth/signals.py +++ b/libraryauth/signals.py @@ -8,16 +8,17 @@ @receiver(registration.signals.user_activated) def handle_same_email_account(sender, user, **kwargs): logger.info('checking %s' % user.username) - old_users=User.objects.exclude(id=user.id).filter(email=user.email) + old_users = User.objects.exclude(id=user.id).filter(email=user.email) for old_user in old_users: # decide why there's a previous user with this email if not old_user.is_active: # never activated old_user.delete() elif old_user.date_joined < user.date_joined: - # relax + # relax pass - else: - # shouldn't happen; don't want to delete the user in case the user is being used for something - old_user.email= '%s.unglue.it'% old_user.email - + else: + # shouldn't happen; don't want to delete the user + # in case the user is being used for something + old_user.email = '%s.unglue.it'% old_user.email + diff --git a/libraryauth/templatetags/libraryauthtags.py b/libraryauth/templatetags/libraryauthtags.py index 9bdd57a97..d95ce4b8d 100644 --- a/libraryauth/templatetags/libraryauthtags.py +++ b/libraryauth/templatetags/libraryauthtags.py @@ -1,5 +1,3 @@ -import unicodedata - from django.template import Library from .. import models @@ -11,9 +9,9 @@ def libname(value): returns library name . """ try: - vl = long( value ) + vl = int(value) lib = models.Library.objects.get(pk=vl) - return lib.__unicode__() + return lib.__str__() except models.Library.DoesNotExist: return value diff --git a/libraryauth/templatetags/puzzle.py b/libraryauth/templatetags/puzzle.py new file mode 100644 index 000000000..a34df389c --- /dev/null +++ b/libraryauth/templatetags/puzzle.py @@ -0,0 +1,30 @@ +from random import randint + +from django.core.cache import cache +from django.template import Library + +register = Library() + +digits = { + 0: '⓪', + 1: '1', + 2: '⓶', + 3: '③', + 4: '⑷', + 5: '⒌', + 6: 'six', + 7: '⑦', + 8: '8️⃣', + 9: '𝟫', + 10: '10', +} +encode_answers = cache.get('encode_answers') + +@register.simple_tag(takes_context=True) +def puzz(context): + num1 = randint(0, 10) + num2 = randint(0, 10) + context['puzznum1'] = digits[num1] + context['puzznum2'] = digits[num2] + context['puzzans'] = encode_answers[num1 + num2] + return '' diff --git a/libraryauth/tests.py b/libraryauth/tests.py index 5ec6fa0e4..d18884903 100644 --- a/libraryauth/tests.py +++ b/libraryauth/tests.py @@ -1,19 +1,27 @@ -import unittest -from django.core.urlresolvers import reverse +from django.urls import reverse from django.test import TestCase from django.contrib.auth.models import User +from django.core.cache import cache + class TestLibraryAuth(TestCase): - fixtures=['initial_data.json'] + fixtures = ['initial_data.json'] def setUp(self): pass + def test_login(self): + resp = self.client.get(reverse('superlogin'), data={'next': '/'}) + self.assertEqual(200, resp.status_code) + self.client.cookies['un'] = 'bob' + resp = self.client.get(reverse('superlogin'), data={'next': '/'}) + self.assertEqual(200, resp.status_code) + resp = self.client.post(reverse('superlogin'), data={'username': 'bob'}) + self.assertEqual(200, resp.status_code) def test_pages(self): resp = self.client.get(reverse('registration_register')) self.assertEqual(200, resp.status_code) - def test_registration(self): """ LibraryAuth Registration creates a new inactive account and a new profile @@ -21,21 +29,25 @@ def test_registration(self): sends an activation email. """ + encode_answers = cache.get('encode_answers') resp = self.client.post(reverse('registration_register'), data={'username': 'bob', 'email': 'bob@example.com', 'password1': 'secret', - 'password2': 'secret'}) + 'password2': 'secret', + 'notarobot': '11', + 'tries': str(encode_answers.get(11)), + }) self.assertRedirects(resp, reverse('registration_complete')) new_user = User.objects.get(username='bob') - self.failUnless(new_user.check_password('secret')) + self.assertTrue(new_user.check_password('secret')) self.assertEqual(new_user.email, 'bob@example.com') # New user must not be active. - self.failIf(new_user.is_active) - + self.assertFalse(new_user.is_active) + def test_bad_registration(self): """ LibraryAuth Registration rejects. @@ -46,14 +58,12 @@ def test_bad_registration(self): 'email': 'bob@mailnesia.com', 'password1': 'secret', 'password2': 'secret'}) - self.assertTrue('Please supply a permanent email address' in resp.content) - + self.assertTrue('Please supply a permanent email address' in str(resp.content, 'utf-8')) + with self.assertRaises(User.DoesNotExist): User.objects.get(username='badbob') - + def test_is_disposable(self): from .emailcheck import is_disposable self.assertFalse(is_disposable('eric@hellman.net')) self.assertTrue(is_disposable('eric@mailnesia.com')) - - diff --git a/libraryauth/urls.py b/libraryauth/urls.py index a14b3cd35..bd336bce7 100644 --- a/libraryauth/urls.py +++ b/libraryauth/urls.py @@ -1,16 +1,16 @@ -from django.conf.urls import patterns, url, include -from django.core.urlresolvers import reverse_lazy +from django.conf.urls import url, include +from django.urls import reverse_lazy from django.views.generic.base import TemplateView from django.contrib.auth.decorators import login_required from django.contrib.auth.views import password_reset -from . import views, models, forms +from . import views, forms from .views import superlogin # class to reproduce django 1.4 funtionality class ExtraContextTemplateView(TemplateView): extra_context = None def get_context_data(self, **kwargs): - context = super(self.__class__, self).get_context_data(**kwargs) + context = super(ExtraContextTemplateView, self).get_context_data(**kwargs) if self.extra_context is not None: for key, value in self.extra_context.items(): if callable(value): @@ -21,51 +21,61 @@ def get_context_data(self, **kwargs): urlpatterns = [ url(r'^libraryauth/(?P\d+)/join/$', views.join_library, name='join_library'), - url(r'^libraryauth/(?P\d+)/deny/$', TemplateView.as_view(template_name='libraryauth/denied.html'), name='bad_library'), - url(r'^libraryauth/(?P\d+)/users/$', views.library, {'template':'libraryauth/users.html'}, name='library_users'), - url(r'^libraryauth/(?P\d+)/admin/$', login_required(views.UpdateLibraryView.as_view()), name='library_admin'), - url(r'^libraryauth/(?P\d+)/login/$', views.login_as_library, name='library_login'), - url(r'^libraryauth/create/$', login_required(views.CreateLibraryView.as_view()), name='library_create'), + url(r'^libraryauth/(?P\d+)/deny/$', + TemplateView.as_view(template_name='libraryauth/denied.html'), + name='bad_library'), + url(r'^libraryauth/(?P\d+)/users/$', + views.library, {'template':'libraryauth/users.html'}, + name='library_users'), + url(r'^libraryauth/(?P\d+)/admin/$', + login_required(views.UpdateLibraryView.as_view()), + name='library_admin'), + url(r'^libraryauth/(?P\d+)/login/$', views.login_as_library, name='library_login'), + url(r'^libraryauth/create/$', + login_required(views.CreateLibraryView.as_view()), + name='library_create'), url(r'^libraryauth/list/$', ExtraContextTemplateView.as_view( - template_name='libraryauth/list.html', - extra_context={'libraries_to_show':'approved'} - ), name='library_list'), + template_name='libraryauth/list.html', + extra_context={'libraries_to_show':'approved'} + ), name='library_list'), url(r'^libraryauth/unapproved/$', ExtraContextTemplateView.as_view( - template_name='libraryauth/list.html', - extra_context={'libraries_to_show':'new'} - ), name='new_libraries'), - url(r'^accounts/register/$', views.CustomRegistrationView.as_view(), name='registration_register'), + template_name='libraryauth/list.html', + extra_context={'libraries_to_show':'new'} + ), name='new_libraries'), + url(r'^accounts/register/$', + views.CustomRegistrationView.as_view(), + name='registration_register'), url(r'^accounts/superlogin/$', views.superlogin, name='superlogin'), url(r'^accounts/superlogin/welcome/$', ExtraContextTemplateView.as_view( - template_name='registration/welcome.html', - extra_context={'suppress_search_box': True,} - ) ), + template_name='registration/welcome.html', + extra_context={'suppress_search_box': True,} + )), url(r'^accounts/login/pledge/$', superlogin, - {'template_name': 'registration/from_pledge.html'}), + {'template_name': 'registration/from_pledge.html'}), url(r'^accounts/login/purchase/$', superlogin, - {'template_name': 'registration/from_purchase.html'}), + {'template_name': 'registration/from_purchase.html'}), url(r'^accounts/login/add/$', superlogin, - {'template_name': 'registration/from_add.html'}), + {'template_name': 'registration/from_add.html'}), url(r'^accounts/activate/complete/$', superlogin, - {'template_name': 'registration/activation_complete.html'}), + {'template_name': 'registration/activation_complete.html'}), url(r'^accounts/login-error/$', superlogin, - {'template_name': 'registration/from_error.html'}), + {'template_name': 'registration/from_error.html'}), url(r'^accounts/edit/$', views.edit_user, name='edit_user'), url(r'^accounts/login/welcome/$', ExtraContextTemplateView.as_view( - template_name='registration/welcome.html', - extra_context={'suppress_search_box': True,} - ) ), + template_name='registration/welcome.html', + extra_context={'suppress_search_box': True,} + )), url(r'^accounts/password/change/$', - views.social_aware_password_change, - {'post_change_redirect': reverse_lazy('auth_password_change_done')}, - name='libraryauth_password_change'), + login_required(views.social_aware_password_change), + {'post_change_redirect': reverse_lazy('password_change_done')}, + name='libraryauth_password_change'), url(r'^password/reset/$', - password_reset, - {'post_reset_redirect': reverse_lazy('auth_password_reset_done'), - 'password_reset_form': forms.SocialAwarePasswordResetForm}, - name='libraryauth_password_reset'), + password_reset, + {'post_reset_redirect': reverse_lazy('password_reset_done'), + 'password_reset_form': forms.SocialAwarePasswordResetForm}, + name='libraryauth_password_reset'), - url(r'^socialauth/', include('social.apps.django_app.urls', namespace='social')), + url(r'^socialauth/', include('social_django.urls', namespace='social')), url('accounts/', include('email_change.urls')), url(r'^accounts/', include('registration.backends.model_activation.urls')), url(r'^accounts/', include('django.contrib.auth.urls')), diff --git a/libraryauth/views.py b/libraryauth/views.py index b17f00c1a..13c6f2bff 100644 --- a/libraryauth/views.py +++ b/libraryauth/views.py @@ -1,25 +1,28 @@ import logging -import random +import re from django.conf import settings -from django.core.urlresolvers import reverse -from django.shortcuts import get_object_or_404, render -from django.contrib.auth.forms import SetPasswordForm -from django.contrib.auth.views import login, password_reset, password_change from django.contrib.auth import login as login_to_user from django.contrib.auth import load_backend from django.contrib.auth.decorators import login_required -from django.http import HttpResponseRedirect -from django.views.generic.edit import FormView, CreateView, UpdateView, SingleObjectMixin +from django.contrib.auth.forms import SetPasswordForm +from django.contrib.auth.models import User +from django.contrib.auth.views import password_change, LoginView +from django.db import IntegrityError +from django.http import HttpResponseRedirect, Http404 +from django.shortcuts import get_object_or_404, redirect, render +from django.urls import reverse + +from django.views.generic.edit import CreateView, UpdateView from registration.backends.model_activation.views import RegistrationView from . import backends -from .models import Library -from .forms import AuthForm, LibraryForm, NewLibraryForm, RegistrationFormNoDisposableEmail, UserData +from .models import Library, BadUsernamePattern, get_special +from .forms import LibraryForm, NewLibraryForm, RegistrationFormNoDisposableEmail, UserData logger = logging.getLogger(__name__) -def get_library_or_404(library=None, library_id=None): +def get_library_or_404(library=None, library_id=None): if library_id: try: return get_object_or_404(Library, id=library_id) @@ -28,36 +31,53 @@ def get_library_or_404(library=None, library_id=None): else: return get_object_or_404(Library, user__username=library) -def library(request, library=None, library_id=None, - extra_context={}, - template='libraryauth/library.html', +def library( + request, library=None, library_id=None, + extra_context={}, template='libraryauth/library.html', **kwargs): - library=get_library_or_404(library=library, library_id=library_id) - context={ 'library':library, - 'is_admin': request.user.is_staff or request.user==library.user, - 'is_member': request.user.is_staff or library.has_user(request.user), - } + library = get_library_or_404(library=library, library_id=library_id) + context = { + 'library':library, + 'is_admin': request.user.is_staff or request.user == library.user, + 'is_member': request.user.is_staff or library.has_user(request.user), + } context.update(extra_context) return render(request, template, context) def join_library(request, library_id): - library=get_library_or_404(library_id=library_id) - return Authenticator(request,library).process( - reverse('library',args=[library.user]), - reverse('bad_library',args=[library.id]), - ) + library = get_library_or_404(library_id=library_id) + return Authenticator(request, library).process( + reverse('library', args=[library.user]), + reverse('bad_library', args=[library.id]), + ) + +class SuperLoginView(LoginView): + def get_initial(self): + initial = super(SuperLoginView, self).get_initial() + if self.request.method == 'GET': + saved_un = self.request.COOKIES.get('un', None) + initial["username"] = saved_un + return initial + + def get(self, request, *args, **kwargs): + if 'add' in self.request.GET: + self.request.session["add_wishlist"] = self.request.GET["add"] + return super(SuperLoginView, self).get(request, *args, **kwargs) + + def get_context_data(self, **kwargs): + context = super(SuperLoginView, self).get_context_data(**kwargs) + if self.request.method == 'POST' and self.request.user.is_anonymous: + username = self.request.POST.get("username", "") + try: + user = User.objects.get(username=username) + context['socials'] = user.profile.social_auths + except: + pass + return context def superlogin(request, extra_context={}, **kwargs): - if request.method == 'POST' and request.user.is_anonymous(): - username=request.POST.get("username", "") - try: - user=models.User.objects.get(username=username) - extra_context={"socials":user.profile.social_auths} - except: - pass - if request.GET.has_key("add"): - request.session["add_wishlist"]=request.GET["add"] - return login(request, extra_context=extra_context, authentication_form=AuthForm, **kwargs) + return SuperLoginView.as_view(extra_context=extra_context, **kwargs)(request) + def social_aware_password_change(request, **kwargs): if request.user.has_usable_password(): @@ -65,55 +85,62 @@ def social_aware_password_change(request, **kwargs): return password_change(request, password_change_form=SetPasswordForm, **kwargs) class Authenticator: - request=None - library=None + request = None + library = None def __init__(self, request, library, *args, **kwargs): self.request = request - if isinstance(library , basestring): + if isinstance(library, str): self.library = Library.objects.get(user__username=library) - elif isinstance(library , Library): - self.library=library + elif isinstance(library, Library): + self.library = library + elif library == special: + return else: raise Exception - self.backend_class=getattr(backends,self.library.backend) + self.backend_class = getattr(backends, self.library.backend) form_class = self.backend_class.form if form_class: self.form = form_class(request, self.library, *args, **kwargs) else: self.form = None - + def process(self, success_url, deny_url): logger.info('authenticator for %s at %s.'%(self.request.user, self.library)) if self.library.has_user(self.request.user): return HttpResponseRedirect(success_url) - + if self.backend_class().authenticate(self.request, self.library): - if self.request.user.is_authenticated(): + if self.request.user.is_authenticated: self.library.add_user(self.request.user) return HttpResponseRedirect(success_url) - else: - return superlogin(self.request, extra_context={'library':self.library}, template_name='libraryauth/library_login.html') - - else: - return self.backend_class.authenticator().process(self, success_url, deny_url) - + return superlogin( + self.request, + extra_context={'library':self.library}, + template_name='libraryauth/library_login.html' + ) + return self.backend_class.authenticator().process(self, success_url, deny_url) + def allowed(self): - return self.backend_class().authenticate(self.request, self.library) - + if self.library: + return self.backend_class().authenticate(self.request, self.library) + +special = get_special() + + class BaseLibraryView: - model = Library - template_name="libraryauth/edit.html" - + model = Library + template_name = "libraryauth/edit.html" + class CreateLibraryView(BaseLibraryView, CreateView): form_class = NewLibraryForm - + def get_initial(self): return {'email': self.request.user.email} def form_valid(self, form): - form.instance.owner = self.request.user + form.instance.owner = self.request.user user = form.instance.user user.email = form.cleaned_data['email'] user.save() @@ -123,12 +150,12 @@ def form_valid(self, form): form.instance.add_user(self.request.user) # library.owner is a member of library context_data = self.get_context_data(form=form) context_data['status'] = 'Library Updated' - return HttpResponseRedirect(reverse('library_admin',args=[form.instance.id])) + return HttpResponseRedirect(reverse('library_admin', args=[form.instance.id])) class UpdateLibraryView(BaseLibraryView, UpdateView): - pk_url_kwarg = 'library_id' + pk_url_kwarg = 'library_id' form_class = LibraryForm - + def form_valid(self, form): context_data = self.get_context_data(form=form) form.instance.save() @@ -137,26 +164,33 @@ def form_valid(self, form): def get_backend_form_class(self): if self.object and self.object.backend: - backend_class=getattr(backends,self.object.backend) + backend_class = getattr(backends, self.object.backend) return backend_class.admin_form - else: - return None - + return None + def get_backend_admin_forms(self): if self.object and self.object.backend: backend_models_name = '%s_auths' % self.object.backend - backend_models = getattr(self.object,backend_models_name) - backend_new_form = self.get_backend_form_class()(initial = {'library':self.object}, prefix="new") - backend_old_forms = [self.get_backend_form_class()(instance = backend_model, prefix="backend_%s"%backend_model.id) for backend_model in backend_models.all()] + backend_models = getattr(self.object, backend_models_name) + backend_new_form = self.get_backend_form_class()( + initial={'library': self.object}, + prefix="new", + ) + backend_old_forms = [self.get_backend_form_class()( + instance=backend_model, + prefix="backend_%s"%backend_model.id, + ) for backend_model in backend_models.all()] return backend_old_forms + [backend_new_form] - else: - return [] - + return [] + def get_context_data(self, backend_form=None, form=None, **kwargs): - context = super(UpdateLibraryView,self).get_context_data(**kwargs) + context = super(UpdateLibraryView, self).get_context_data(**kwargs) backend_admin_forms = self.get_backend_admin_forms() if backend_form: - backend_admin_forms = [ backend_form if backend_form.prefix== backend_admin_form.prefix else backend_admin_form for backend_admin_form in backend_admin_forms] + backend_admin_forms = [ + backend_form if backend_form.prefix == backend_admin_form.prefix \ + else backend_admin_form for backend_admin_form in backend_admin_forms + ] context['backend_admin_forms'] = backend_admin_forms if form: context['form'] = form @@ -166,17 +200,17 @@ def get(self, request, *args, **kwargs): self.object = self.get_object() # check permissions if request.user not in [self.object.owner, self.object.user]: - context_data={'status': 'You\'re not permitted to edit this library.'} + context_data = {'status': 'You\'re not permitted to edit this library.'} return self.render_to_response(context_data) form = self.get_form(self.form_class) return self.render_to_response(self.get_context_data(form=form)) - + def post(self, request, *args, **kwargs): # get the user instance (the library) self.object = self.get_object() # check permissions if request.user not in [self.object.owner, self.object.user]: - context_data={'status': 'You\'re not permitted to edit this library.'} + context_data = {'status': 'You\'re not permitted to edit this library.'} return self.render_to_response(context_data) # determine if backend form is being submitted # uses the name of the form's submit button @@ -187,46 +221,53 @@ def post(self, request, *args, **kwargs): backend_id = request.POST['id'] if 'backend_submit' in request.POST: # we're editing the backend - if backend_id is None or backend_id=="None": + if backend_id is None or backend_id == "None": backend_model_instance = form_model(library=self.object) - form = form_class(data=request.POST, instance=backend_model_instance, prefix="new") + form = form_class( + data=request.POST, + instance=backend_model_instance, + prefix="new" + ) else: backend_model_instance = form_model.objects.get(id=backend_id) - form = form_class(data=request.POST, instance=backend_model_instance, prefix="backend_%s"%request.POST['id']) + form = form_class( + data=request.POST, + instance=backend_model_instance, + prefix="backend_%s"%request.POST['id'], + ) if form.is_valid(): form.save() status = 'User Validation Updated.' - context_data = self.get_context_data( form=self.form_class(instance=self.object)) + context_data = self.get_context_data( + form=self.form_class(instance=self.object)) else: status = 'Problem with User Validation.' - context_data = self.get_context_data(backend_form=form, form=self.form_class(instance=self.object)) + context_data = self.get_context_data( + backend_form=form, form=self.form_class(instance=self.object)) else: #deleting a backend - if backend_id is not None and backend_id!="None": + if backend_id is not None and backend_id != "None": backend_model_instance = form_model.objects.get(id=backend_id) backend_model_instance.delete() status = 'Deleted.' else: status = 'Nothing to delete.' - context_data = self.get_context_data( form=self.form_class(instance=self.object)) + context_data = self.get_context_data(form=self.form_class(instance=self.object)) context_data['status'] = status return self.render_to_response(context_data) else: - # just use regular post handler + # just use regular post handler form = self.get_form(self.form_class) if form.is_valid(): return self.form_valid(form) - else: - return self.form_invalid(form) - + return self.form_invalid(form) + @login_required -def login_as_library(request, library_id): - library=get_library_or_404(library_id=library_id) +def login_as_library(request, library_id): + library = get_library_or_404(library_id=library_id) if request.user == library.owner: login_user(request, library.user) - - return HttpResponseRedirect(reverse('library_admin',args=[library.id])) - + return HttpResponseRedirect(reverse('library_admin', args=[library.id])) def login_user(request, user): """ @@ -244,36 +285,88 @@ def login_user(request, user): return login_to_user(request, user) robot_qs = { - 'user', - 'user/register', - 'node/add', - } - + 'user', + 'user/register', + 'node/add', +} + class CustomRegistrationView(RegistrationView): form_class = RegistrationFormNoDisposableEmail + def pretend_success(self): + # pretend success + success_url = self.get_success_url(None) + try: + to, args, kwargs = success_url + return redirect(to, *args, **kwargs) + except ValueError: + return redirect(success_url) + def form_valid(self, form): - q = self.request.session.get('q', False) + q = self.request.session.get('q', False) if q and q in robot_qs: return self.render_to_response({'form':form}) - return super(CustomRegistrationView,self).form_valid(form) - + username = form.cleaned_data['username'] + email = form.cleaned_data['email'] + for bad_pattern in BadUsernamePattern.objects.all(): + if bad_pattern.matches(username): + return self.pretend_success() + if suspicious(username, email): + return self.pretend_success() + special_auth = Authenticator(self.request, special) + if special_auth.allowed(): + logger.info('special login from %s' % self.request.META['REMOTE_ADDR']) + return self.pretend_success() + try: + if form.cleaned_data['password1']: + return super(CustomRegistrationView, self).form_valid(form) + else: + # it's just the user and password + return self.render_to_response({'form':form}) + except IntegrityError: + # probably rapid double click + return self.pretend_success() + +SUSPICIOUSUN = re.compile(r'^[A-Z][a-z]{7}[a-z]*$', ) +MANYDOTS = re.compile(r'(\.[^\.]+){4}') +def similar(s1, s2): + #trigrams in common + (short, longer) = (s1, s2) if len(s2) > len(s1) else (s2, s1) + if len(short) < 3: + return short in longer + for trigram in [short[i:i + 3] for i in range(0, len(short) - 2)]: + if trigram in longer: + return True + return False + +def suspicious(username, email): + if '@' not in email: + return False + [em_username, host] = email.split('@')[0:2] + if MANYDOTS.search(em_username): + return True + test = SUSPICIOUSUN.search(username) + if not test: + return False + if similar(em_username.lower(), test.group(0).lower()): + return False + return not similar(host.lower(), test.group(0).lower()) + def edit_user(request, redirect_to=None): - if not request.user.is_authenticated(): - return HttpResponseRedirect(reverse('superlogin')) - form=UserData() - if request.method == 'POST': + if not request.user.is_authenticated: + return HttpResponseRedirect(reverse('superlogin')) + form = UserData() + if request.method == 'POST': if 'change_username' in request.POST.keys(): form = UserData(request.POST) form.oldusername = request.user.username if form.is_valid(): # All validation rules pass, go and change the username - request.user.username=form.cleaned_data['username'] + request.user.username = form.cleaned_data['username'] request.user.save() - if 'set_password' in request.POST.keys() and form.cleaned_data.has_key('set_password'): + if 'set_password' in request.POST.keys() and \ + 'set_password' in form.cleaned_data: if not request.user.has_usable_password(): request.user.set_password(form.cleaned_data['set_password']) request.user.save() - return HttpResponseRedirect(redirect_to if redirect_to else reverse('home')) # Redirect after POST - return render(request,'registration/user_change_form.html', {'form': form}) - - - + # Redirect after POST + return HttpResponseRedirect(redirect_to if redirect_to else reverse('home')) + return render(request, 'registration/user_change_form.html', {'form': form}) diff --git a/marc/load.py b/marc/load.py index 7482d3642..715c13a8f 100644 --- a/marc/load.py +++ b/marc/load.py @@ -8,10 +8,9 @@ import pymarc import logging from datetime import datetime -from StringIO import StringIO from django.conf import settings -from django.core.urlresolvers import reverse +from django.urls import reverse import regluit.core.cc as cc @@ -95,7 +94,7 @@ def stub(edition): tag='250', indicators = [' ', ' '], subfields = [ - 'a', unicode(edition.note), + 'a', str(edition.note), ] ) record.add_ordered_field(field250) @@ -110,7 +109,7 @@ def stub(edition): ] ) if edition.publication_date: - field260.add_subfield('c', unicode(edition.publication_date)) + field260.add_subfield('c', str(edition.publication_date)) record.add_ordered_field(field260) if edition.description: diff --git a/marc/management/commands/load_marc_file.py b/marc/management/commands/load_marc_file.py index 859926cfc..3beaac513 100644 --- a/marc/management/commands/load_marc_file.py +++ b/marc/management/commands/load_marc_file.py @@ -8,4 +8,4 @@ class Command(BaseCommand): def handle(self, file, **options): xml_file = open(file,'r') num_loaded = import_records(xml_file) - print '%s records created' % num_loaded + print('%s records created' % num_loaded) diff --git a/marc/management/commands/migrate_records.py b/marc/management/commands/migrate_records.py index 79d94e518..5808a5c72 100644 --- a/marc/management/commands/migrate_records.py +++ b/marc/management/commands/migrate_records.py @@ -24,8 +24,8 @@ def handle(self, stoprecord=None, **options): editions.append(old_record.edition.pk) xml_file.close() new_record.save() - print 'record %s updated' % new_record.id + print('record %s updated' % new_record.id) except IOError: if created: new_record.delete() - print 'failed opening %s' % old_record.xml_record + print('failed opening %s' % old_record.xml_record) diff --git a/marc/migrations/0001_initial.py b/marc/migrations/0001_initial.py index 740d53010..644931045 100644 --- a/marc/migrations/0001_initial.py +++ b/marc/migrations/0001_initial.py @@ -19,8 +19,8 @@ class Migration(migrations.Migration): ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('guts', models.TextField()), ('created', models.DateTimeField(auto_now_add=True)), - ('edition', models.ForeignKey(related_name='MARCRecords', to='core.Edition', null=True)), - ('user', models.ForeignKey(related_name='MARCRecords', to=settings.AUTH_USER_MODEL, null=True)), + ('edition', models.ForeignKey(on_delete=models.CASCADE, related_name='MARCRecords', to='core.Edition', null=True)), + ('user', models.ForeignKey(on_delete=models.CASCADE, related_name='MARCRecords', to=settings.AUTH_USER_MODEL, null=True)), ], ), ] diff --git a/marc/models.py b/marc/models.py index 65bf14a6e..b9c05d591 100644 --- a/marc/models.py +++ b/marc/models.py @@ -1,7 +1,7 @@ import pymarc import logging from datetime import datetime -from StringIO import StringIO +from io import BytesIO from django.apps import apps from django.conf import settings @@ -79,7 +79,7 @@ def _xml(record): return pymarc.record_to_xml(record) def _mrc(record): - mrc_file = StringIO() + mrc_file = BytesIO() writer = pymarc.MARCWriter(mrc_file) writer.write(record) mrc_file.seek(0) @@ -93,9 +93,9 @@ class MARCRecord(models.Model): _the_record = None # note capitalization of related_name - edition = models.ForeignKey(EDITION_MODEL, related_name="MARCRecords", null=True) + edition = models.ForeignKey(EDITION_MODEL, on_delete=models.CASCADE, related_name="MARCRecords", null=True) - user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="MARCRecords", null=True ) + user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name="MARCRecords", null=True ) created = models.DateTimeField(auto_now_add=True) @@ -131,8 +131,8 @@ def save(self, *args, **kwargs): def load_from_file(self, source='raw'): #parse guts - if isinstance(self.guts, str) or isinstance(self.guts, unicode): - marcfile = StringIO(self.guts) + if isinstance(self.guts, str) or isinstance(self.guts, str): + marcfile = BytesIO(bytes(self.guts, 'utf-8')) else: marcfile = self.guts if source == 'loc': @@ -147,7 +147,7 @@ def _record(self): if self._the_record: the_record = self._the_record else: - the_record = pymarc.parse_xml_to_array(StringIO(self.guts))[0] + the_record = pymarc.parse_xml_to_array(BytesIO(bytes(self.guts, 'utf-8')))[0] for field in the_record.get_fields('856'): the_record.remove_field(field) self._the_record = the_record diff --git a/marc/urls.py b/marc/urls.py index 5b8db2303..8ef66c01c 100644 --- a/marc/urls.py +++ b/marc/urls.py @@ -1,4 +1,4 @@ -from django.conf.urls import patterns, url, include +from django.conf.urls import url, include from django.contrib.auth.decorators import login_required from . import views diff --git a/marc/views.py b/marc/views.py index 010feaac4..6197582da 100644 --- a/marc/views.py +++ b/marc/views.py @@ -3,7 +3,7 @@ from django.apps import apps from django.contrib import messages -from django.core.urlresolvers import reverse, reverse_lazy +from django.urls import reverse, reverse_lazy from django.http import HttpResponseRedirect, HttpResponse, HttpResponseNotFound from django.views.generic.edit import FormView @@ -58,14 +58,14 @@ def marc_records(request, selected_records=None): elif hasattr(record_name, 'edition'): record = models.MARCRecord(edition=record_name.edition) elif record_name.startswith('edition_'): - record_id = long(record_name[8:]) + record_id = int(record_name[8:]) try: edition = Edition.objects.get(pk=record_id) except Edition.DoesNotExist: continue record = models.MARCRecord(edition=edition) elif record_name.startswith('record_'): - record_id = long(record_name[7:]) + record_id = int(record_name[7:]) try: record = models.MARCRecord.objects.get(id=record_id ) except models.MARCRecord.DoesNotExist: @@ -74,7 +74,8 @@ def marc_records(request, selected_records=None): outfile.write(record.record(format=format, link_target=link_target)) if format == 'xml': - outfile.write('') + outfile.write(''' +''') return outfile @@ -102,9 +103,9 @@ class MARCUpload(FormView): def get_initial(self): if self.request.method == 'GET': edition = self.request.GET.get('edition',None) - if Edition.objects.filter(id=edition).count(): + if Edition.objects.filter(id=edition).exists(): edition = Edition.objects.filter(id=edition)[0] - if edition.ebooks.count() or edition.ebook_files.count(): + if edition.ebooks.exists() or edition.ebook_files.exists(): return {'edition':edition.id} return {} diff --git a/mobi/__init__.py b/mobi/__init__.py deleted file mode 100644 index 287ed87fd..000000000 --- a/mobi/__init__.py +++ /dev/null @@ -1,286 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 -""" -Mobi.py - -Created by Elliot Kroo on 2009-12-25. -Copyright (c) 2009 Elliot Kroo. All rights reserved. -""" - -import sys -import os -import unittest -from struct import * -from pprint import pprint -import utils -from lz77 import uncompress_lz77 - -class Mobi: - def parse(self): - """ reads in the file, then parses record tables""" - self.contents = self.f.read(); - self.header = self.parseHeader(); - self.records = self.parseRecordInfoList(); - self.readRecord0() - - def readRecord(self, recordnum, disable_compression=False): - if self.config: - if self.config['palmdoc']['Compression'] == 1 or disable_compression: - return self.contents[self.records[recordnum]['record Data Offset']:self.records[recordnum+1]['record Data Offset']]; - elif self.config['palmdoc']['Compression'] == 2: - result = uncompress_lz77(self.contents[self.records[recordnum]['record Data Offset']:self.records[recordnum+1]['record Data Offset']-self.config['mobi']['extra bytes']]) - return result - - def readImageRecord(self, imgnum): - if self.config: - recordnum = self.config['mobi']['First Image index'] + imgnum; - return self.readRecord(recordnum, disable_compression=True); - - def author(self): - "Returns the author of the book" - return self.config['exth']['records'][100] - - def title(self): - "Returns the title of the book" - return self.config['mobi']['Full Name'] - -########### Private API ########################### - - def __init__(self, filename): - try: - if isinstance(filename, str): - self.f = open(filename, "rb"); - else: - self.f = filename; - except IOError,e: - sys.stderr.write("Could not open %s! " % filename); - raise e; - self.offset = 0; - - def __iter__(self): - if not self.config: return; - for record in range(1, self.config['mobi']['First Non-book index'] - 1): - yield self.readRecord(record); - - def parseRecordInfoList(self): - records = {}; - # read in all records in info list - for recordID in range(self.header['number of records']): - headerfmt = '>II' - headerlen = calcsize(headerfmt) - fields = [ - "record Data Offset", - "UniqueID", - ] - # create tuple with info - results = zip(fields, unpack(headerfmt, self.contents[self.offset:self.offset+headerlen])) - - # increment offset into file - self.offset += headerlen - - # convert tuple to dictionary - resultsDict = utils.toDict(results); - - # futz around with the unique ID record, as the uniqueID's top 8 bytes are - # really the "record attributes": - resultsDict['record Attributes'] = (resultsDict['UniqueID'] & 0xFF000000) >> 24; - resultsDict['UniqueID'] = resultsDict['UniqueID'] & 0x00FFFFFF; - - # store into the records dict - records[resultsDict['UniqueID']] = resultsDict; - - return records; - - def parseHeader(self): - headerfmt = '>32shhIIIIII4s4sIIH' - headerlen = calcsize(headerfmt) - fields = [ - "name", - "attributes", - "version", - "created", - "modified", - "backup", - "modnum", - "appInfoId", - "sortInfoID", - "type", - "creator", - "uniqueIDseed", - "nextRecordListID", - "number of records" - ] - - # unpack header, zip up into list of tuples - results = zip(fields, unpack(headerfmt, self.contents[self.offset:self.offset+headerlen])) - - # increment offset into file - self.offset += headerlen - - # convert tuple array to dictionary - resultsDict = utils.toDict(results); - - return resultsDict - - def readRecord0(self): - palmdocHeader = self.parsePalmDOCHeader(); - MobiHeader = self.parseMobiHeader(); - exthHeader = None - if MobiHeader['Has EXTH Header']: - exthHeader = self.parseEXTHHeader(); - - self.config = { - 'palmdoc': palmdocHeader, - 'mobi' : MobiHeader, - 'exth' : exthHeader - } - - def parseEXTHHeader(self): - headerfmt = '>III' - headerlen = calcsize(headerfmt) - - fields = [ - 'identifier', - 'header length', - 'record Count' - ] - - # unpack header, zip up into list of tuples - results = zip(fields, unpack(headerfmt, self.contents[self.offset:self.offset+headerlen])) - - # convert tuple array to dictionary - resultsDict = utils.toDict(results); - - self.offset += headerlen; - resultsDict['records'] = {}; - for record in range(resultsDict['record Count']): - recordType, recordLen = unpack(">II", self.contents[self.offset:self.offset+8]); - recordData = self.contents[self.offset+8:self.offset+recordLen]; - resultsDict['records'][recordType] = recordData; - self.offset += recordLen; - - return resultsDict; - - def parseMobiHeader(self): - headerfmt = '> IIII II 40s III IIIII IIII I 36s IIII 8s HHIIIII' - headerlen = calcsize(headerfmt) - - fields = [ - "identifier", - "header length", - "Mobi type", - "text Encoding", - - "Unique-ID", - "Generator version", - - "-Reserved", - - "First Non-book index", - "Full Name Offset", - "Full Name Length", - - "Language", - "Input Language", - "Output Language", - "Format version", - "First Image index", - - "First Huff Record", - "Huff Record Count", - "First DATP Record", - "DATP Record Count", - - "EXTH flags", - - "-36 unknown bytes, if Mobi is long enough", - - "DRM Offset", - "DRM Count", - "DRM Size", - "DRM Flags", - - "-Usually Zeros, unknown 8 bytes", - - "-Unknown", - "Last Image Record", - "-Unknown", - "FCIS record", - "-Unknown", - "FLIS record", - "Unknown" - ] - - # unpack header, zip up into list of tuples - results = zip(fields, unpack(headerfmt, self.contents[self.offset:self.offset+headerlen])) - - # convert tuple array to dictionary - resultsDict = utils.toDict(results); - - resultsDict['Start Offset'] = self.offset; - - resultsDict['Full Name'] = (self.contents[ - self.records[0]['record Data Offset'] + resultsDict['Full Name Offset'] : - self.records[0]['record Data Offset'] + resultsDict['Full Name Offset'] + resultsDict['Full Name Length']]) - - resultsDict['Has DRM'] = resultsDict['DRM Offset'] != 0xFFFFFFFF; - - resultsDict['Has EXTH Header'] = (resultsDict['EXTH flags'] & 0x40) != 0; - - self.offset += resultsDict['header length']; - - def onebits(x, width=16): - return len(filter(lambda x: x == "1", (str((x>>i)&1) for i in xrange(width-1,-1,-1)))); - - resultsDict['extra bytes'] = 2*onebits(unpack(">H", self.contents[self.offset-2:self.offset])[0] & 0xFFFE) - - return resultsDict; - - def parsePalmDOCHeader(self): - headerfmt = '>HHIHHHH' - headerlen = calcsize(headerfmt) - fields = [ - "Compression", - "Unused", - "text length", - "record count", - "record size", - "Encryption Type", - "Unknown" - ] - offset = self.records[0]['record Data Offset']; - # create tuple with info - results = zip(fields, unpack(headerfmt, self.contents[offset:offset+headerlen])) - - # convert tuple array to dictionary - resultsDict = utils.toDict(results); - - self.offset = offset+headerlen; - return resultsDict - -class MobiTests(unittest.TestCase): - def setUp(self): - self.mobitest = Mobi("../test/CharlesDarwin.mobi"); - def testParse(self): - self.mobitest.parse(); - pprint (self.mobitest.config) - def testRead(self): - self.mobitest.parse(); - content = "" - for i in range(1,5): - content += self.mobitest.readRecord(i); - def testImage(self): - self.mobitest.parse(); - pprint (self.mobitest.records); - for record in range(4): - f = open("imagerecord%d.jpg" % record, 'w') - f.write(self.mobitest.readImageRecord(record)); - f.close(); - def testAuthorTitle(self): - self.mobitest.parse() - self.assertEqual(self.mobitest.author(), 'Charles Darwin') - self.assertEqual(self.mobitest.title(), 'The Origin of Species by means '+ - 'of Natural Selection, 6th Edition') - -if __name__ == '__main__': - unittest.main() diff --git a/mobi/lz77.py b/mobi/lz77.py deleted file mode 100644 index c61ddadeb..000000000 --- a/mobi/lz77.py +++ /dev/null @@ -1,86 +0,0 @@ -import struct -# ported directly from the PalmDoc Perl library -# http://kobesearch.cpan.org/htdocs/EBook-Tools/EBook/Tools/PalmDoc.pm.html - -def uncompress_lz77(data): - length = len(data); - offset = 0; # Current offset into data - # char; # Character being examined - # ord; # Ordinal of $char - # lz77; # 16-bit Lempel-Ziv 77 length-offset pair - # lz77offset; # LZ77 offset - # lz77length; # LZ77 length - # lz77pos; # Position inside $lz77length - text = ''; # Output (uncompressed) text - # textlength; # Length of uncompressed text during LZ77 pass - # textpos; # Position inside $text during LZ77 pass - - while offset < length: - # char = substr($data,$offset++,1); - char = data[offset]; - offset += 1; - ord_ = ord(char); - - # print " ".join([repr(char), hex(ord_)]) - - # The long if-elsif chain is the best logic for $ord handling - ## no critic (Cascading if-elsif chain) - if (ord_ == 0): - # Nulls are literal - text += char; - elif (ord_ <= 8): - # Next $ord bytes are literal - text += data[offset:offset+ord_] # text .=substr($data,$offset,ord); - offset += ord_; - elif (ord_ <= 0x7f): - # Values from 0x09 through 0x7f are literal - text += char; - elif (ord_ <= 0xbf): - # Data is LZ77-compressed - - # From Wikipedia: - # "A length-distance pair is always encoded by a two-byte - # sequence. Of the 16 bits that make up these two bytes, - # 11 bits go to encoding the distance, 3 go to encoding - # the length, and the remaining two are used to make sure - # the decoder can identify the first byte as the beginning - # of such a two-byte sequence." - - offset += 1; - if (offset > len(data)): - print("WARNING: offset to LZ77 bits is outside of the data: %d" % offset); - return text; - - lz77, = struct.unpack('>H', data[offset-2:offset]) - - # Leftmost two bits are ID bits and need to be dropped - lz77 &= 0x3fff; - - # Length is rightmost 3 bits + 3 - lz77length = (lz77 & 0x0007) + 3; - - # Remaining 11 bits are offset - lz77offset = lz77 >> 3; - if (lz77offset < 1): - print("WARNING: LZ77 decompression offset is invalid!"); - return text; - - # Getting text from the offset is a little tricky, because - # in theory you can be referring to characters you haven't - # actually decompressed yet. You therefore have to check - # the reference one character at a time. - textlength = len(text); - for lz77pos in range(lz77length): # for($lz77pos = 0; $lz77pos < $lz77length; $lz77pos++) - textpos = textlength - lz77offset; - if (textpos < 0): - print("WARNING: LZ77 decompression reference is before"+ - " beginning of text! %x" % lz77); - return; - - text += text[textpos:textpos+1]; #text .= substr($text,$textpos,1); - textlength+=1; - else: - # 0xc0 - 0xff are single characters (XOR 0x80) preceded by - # a space - text += ' ' + chr(ord_ ^ 0x80); - return text; diff --git a/mobi/utils.py b/mobi/utils.py deleted file mode 100644 index 946f1e80f..000000000 --- a/mobi/utils.py +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 -""" -utils.py - -Created by Elliot Kroo on 2009-12-25. -Copyright (c) 2009 Elliot Kroo. All rights reserved. -""" - -import sys -import os -import unittest - - -def toDict(tuples): - resultsDict = {} - for field, value in tuples: - if len(field) > 0 and field[0] != "-": - resultsDict[field] = value - return resultsDict; diff --git a/payment/admin.py b/payment/admin.py index c96c46e86..65fe005ef 100644 --- a/payment/admin.py +++ b/payment/admin.py @@ -1,5 +1,5 @@ from django.contrib.admin import ModelAdmin, site -from .models import Transaction, PaymentResponse, Receiver +from .models import Transaction, PaymentResponse, Receiver, Account class TransactionAdmin(ModelAdmin): list_display = ('campaign', 'user', 'amount', 'status', 'error') @@ -11,8 +11,25 @@ class PaymentResponseAdmin(ModelAdmin): class ReceiverAdmin(ModelAdmin): ordering = ('email',) +def deactivate(modeladmin, request, queryset): + for obj in queryset: + obj.deactivate() + +class AccountAdmin(ModelAdmin): + search_fields = ('user__username', 'user__email',) + list_display = ('user', 'card_type', 'card_exp_year', 'status') + readonly_fields = ('user', 'card_type', 'card_last4', 'card_exp_month', 'card_exp_year', + 'date_created', 'date_modified', 'date_deactivated', 'status') + fields = readonly_fields + actions = [deactivate] + + def deactivate(self, request, queryset): + for obj in queryset: + obj.deactivate() + # payments site.register(Transaction, TransactionAdmin) site.register(PaymentResponse, PaymentResponseAdmin) site.register(Receiver, ReceiverAdmin) +site.register(Account, AccountAdmin) diff --git a/payment/baseprocessor.py b/payment/baseprocessor.py index 2c3bea66e..f3c1d2dfe 100644 --- a/payment/baseprocessor.py +++ b/payment/baseprocessor.py @@ -155,7 +155,7 @@ class Finish(BasePaymentRequest): ''' def __init__(self, transaction): - print "Finish" + print("Finish") class PaymentDetails(BasePaymentRequest): diff --git a/payment/management/commands/grant_user_credit.py b/payment/management/commands/grant_user_credit.py index dbfa4c553..7eed0048b 100644 --- a/payment/management/commands/grant_user_credit.py +++ b/payment/management/commands/grant_user_credit.py @@ -1,24 +1,33 @@ +from decimal import Decimal + from django.core.management.base import BaseCommand from django.contrib.auth.models import User +ACTIONS = ("debit", "redeem", "credit", "cash") class Command(BaseCommand): help = "grant (or debit or redeem) credit to a user. \ Usage: grant_user_credit \ amount is dollars or 'all' " - args = " " - def handle(self, username, amount, action="credit", *args, **kwargs): - if action not in ("debit", "redeem", "credit"): - print 'action should be in ("debit", "redeem", "credit")' + def add_arguments(self, parser): + parser.add_argument('username', type=str, help="user to credit") + parser.add_argument('amount', type=str, help="amount to credit") + parser.add_argument('action', type=str, help="credit/debit/redeem/cash") + + def handle(self, username, amount, action, **options): + if action not in ACTIONS: + self.stdout.write('action should be in %s' % str(ACTIONS)) return user = User.objects.get(username=username) if amount == 'all': amount = user.credit.available - if action in ("debit", "redeem" ): - amount = -int(amount) - elif action == "credit": - amount = int(amount) - notify = action != "redeem" - user.credit.add_to_balance(amount, notify=notify) - print "{}ed ${} from {}".format(action, amount, username) - print "{} now has a balance of {} credits".format(username, user.credit.balance) + if action == 'redeem': + user.credit.use_pledge(amount) + else: + amount = Decimal(amount) + if action in ("debit", "cash"): + amount = - amount + user.credit.add_to_balance(amount, notify=action != "cash") + self.stdout.write("{}ed ${} from {}".format(action, amount, username)) + self.stdout.write("{} now has a balance of {} credits".format( + username, user.credit.balance)) diff --git a/payment/management/commands/retotal.py b/payment/management/commands/retotal.py index dacf45748..599096df2 100644 --- a/payment/management/commands/retotal.py +++ b/payment/management/commands/retotal.py @@ -9,5 +9,5 @@ def handle(self, *args, **kwargs): campaigns = regluit.core.models.Campaign.objects.all() for c in campaigns: c.update_left() - print c.left + print(c.left) \ No newline at end of file diff --git a/payment/management/commands/test_stripe_charge.py b/payment/management/commands/test_stripe_charge.py index 2e7f3cbfd..8b5ad0e87 100644 --- a/payment/management/commands/test_stripe_charge.py +++ b/payment/management/commands/test_stripe_charge.py @@ -12,7 +12,7 @@ def handle(self, *args, **kwargs): sc = stripelib.StripeClient() card = stripelib.card(number="4242424242424242", exp_month="01", exp_year="2013", cvc="123") cust = sc.create_customer(card=card, description="William Shakespeare XIV (via test_stripe_charge)", email="bill.shakespeare@gmail.com") - print cust + print(cust) # let's charge RY $1.00 charge = sc.create_charge(D('1.00'), customer=cust.id, description="$1 TEST CHARGE for Will S. XIV") - print charge \ No newline at end of file + print(charge) \ No newline at end of file diff --git a/payment/management/commands/transaction_status.py b/payment/management/commands/transaction_status.py deleted file mode 100644 index cb7b73bca..000000000 --- a/payment/management/commands/transaction_status.py +++ /dev/null @@ -1,16 +0,0 @@ -from pprint import pprint - -from django.core.management.base import BaseCommand - -import regluit - -class Command(BaseCommand): - help = "show current status of transactions" - - def handle(self, *args, **kwargs): - transactions = regluit.payment.models.Transaction.objects.all() - for t in transactions: - pd = regluit.payment.paypal.PaymentDetails(t) - print pprint(pd.response) - print pd.compare() - \ No newline at end of file diff --git a/payment/manager.py b/payment/manager.py index bfdf2e36b..c3c55be5f 100644 --- a/payment/manager.py +++ b/payment/manager.py @@ -3,26 +3,21 @@ """ import logging import traceback -import urllib -import urlparse -import uuid +from urllib.parse import urlencode +from urllib.parse import urljoin from datetime import timedelta -from dateutil.relativedelta import relativedelta from decimal import Decimal as D -from xml.dom import minidom +from dateutil.relativedelta import relativedelta + +#django imports -""" -django imports -""" from django.conf import settings -from django.contrib.auth.models import User -from django.core.urlresolvers import reverse +from django.urls import reverse from django.utils.timezone import now -""" -regluit imports -""" +#regluit imports + from regluit.payment import credit from regluit.payment.models import Transaction, Receiver, PaymentResponse, Account from regluit.payment.parameters import * @@ -31,19 +26,20 @@ logger = logging.getLogger(__name__) def append_element(doc, parent, name, text): - + element = doc.createElement(name) parent.appendChild(element) text_node = doc.createTextNode(text) element.appendChild(text_node) - + return element -# at this point, there is no internal context and therefore, the methods of PaymentManager can be recast into static methods -class PaymentManager( object ): - +# at this point, there is no internal context and therefore, the methods of PaymentManager can be +# recast into static methods +class PaymentManager(object): + def processIPN(self, request, module): - + # Forward to our payment processor mod = __import__("regluit.payment." + module, fromlist=[str(module)]) return mod.Processor().ProcessIPN(request) @@ -52,39 +48,39 @@ def update_preapproval(self, transaction): """Update a transaction to hold the data from a PreapprovalDetails on that transaction""" t = transaction p = transaction.get_payment_class().PreapprovalDetails(t) - + preapproval_status = {'id':t.id, 'key':t.preapproval_key} - + if p.error() or not p.success(): - logger.info("Error retrieving preapproval details for transaction %d" % t.id) + logger.info("Error retrieving preapproval details for transaction %d", t.id) preapproval_status["error"] = "An error occurred while verifying this transaction, see server logs for details" else: - + # Check the transaction status if t.status != p.status: preapproval_status["status"] = {'ours':t.status, 'theirs':p.status} t.status = p.status t.local_status = p.local_status t.save() - + # check the currency code if t.currency != p.currency: preapproval_status["currency"] = {'ours':t.currency, 'theirs':p.currency} t.currency = p.currency t.save() - + # Check the amount if t.max_amount != D(p.amount): preapproval_status["amount"] = {'ours':t.max_amount, 'theirs':p.amount} t.max_amount = p.amount t.save() - + # Check approved if t.approved != p.approved: preapproval_status["approved"] = {'ours':t.approved, 'theirs':p.approved} t.approved = p.approved t.save() - + # In amazon FPS, we may not have a pay_key via the return URL, update here try: if t.pay_key != p.pay_key: @@ -94,112 +90,118 @@ def update_preapproval(self, transaction): except: # No problem, p.pay_key is not defined for paypal function blah = "blah" - - - return preapproval_status + + + return preapproval_status def update_payment(self, transaction): """Update a transaction to hold the data from a PaymentDetails on that transaction""" t = transaction payment_status = {'id':t.id} - + p = transaction.get_payment_class().PaymentDetails(t) - + if p.error() or not p.success(): - logger.info("Error retrieving payment details for transaction %d" % t.id) + logger.info("Error retrieving payment details for transaction %d", t.id) payment_status['error'] = "An error occurred while verifying this transaction, see server logs for details" else: - + # Check the transaction status if t.status != p.status: payment_status['status'] = {'ours': t.status, 'theirs': p.status} - + t.status = p.status t.local_status = p.local_status t.save() - + receivers_status = [] - + for r in p.transactions: # This is only supported for paypal at this time try: receiver = Receiver.objects.get(transaction=t, email=r['email']) - + receiver_status = {'email':r['email']} - + logger.info(r) logger.info(receiver) - - # Check for updates on each receiver's status. Note that unprocessed delayed chained payments + + # Check for updates on each receiver's status. + # Note that unprocessed delayed chained payments # will not have a status code or txn id code if receiver.status != r['status']: receiver_status['status'] = {'ours': receiver.status, 'theirs': r['status']} receiver.status = r['status'] receiver.save() - + if receiver.txn_id != r['txn_id']: receiver_status['txn_id'] = {'ours':receiver.txn_id, 'theirs':r['txn_id']} - + receiver.txn_id = r['txn_id'] receiver.save() - + except: traceback.print_exc() - - if not set(["status","txn_id"]).isdisjoint(receiver_status.keys()): + + if not set(["status", "txn_id"]).isdisjoint(receiver_status.keys()): receivers_status.append(receiver_status) - - if len(receivers_status): + + if len(receivers_status) > 0: payment_status["receivers"] = receivers_status - + return payment_status - + def checkStatus(self, past_days=None, transactions=None): - + ''' Run through all pay transactions and verify that their current status is as we think. - - Allow for a list of transactions to be passed in or for the method to check on all transactions within the - given past_days - + + Allow for a list of transactions to be passed in or for the method to check on all + transactions within the given past_days + ''' - + DEFAULT_DAYS_TO_CHECK = 3 - + status = {'payments':[], 'preapprovals':[]} - - # look at all PAY transactions for stated number of past days; if past_days is not int, get all Transaction + + # look at all PAY transactions for stated number of past days; + # if past_days is not int, get all Transaction # only PAY transactions have date_payment not None - + if transactions is None: - + if past_days is None: past_days = DEFAULT_DAYS_TO_CHECK - + try: ref_date = now() - relativedelta(days=int(past_days)) payment_transactions = Transaction.objects.filter(date_payment__gte=ref_date) except: ref_date = now() payment_transactions = Transaction.objects.filter(date_payment__isnull=False) - + logger.info(payment_transactions) - + # Now look for preapprovals that have not been paid and check on their status - preapproval_transactions = Transaction.objects.filter(date_authorized__gte=ref_date, date_payment=None, type=PAYMENT_TYPE_AUTHORIZATION) - + preapproval_transactions = Transaction.objects.filter( + date_authorized__gte=ref_date, + date_payment=None, + type=PAYMENT_TYPE_AUTHORIZATION) + logger.info(preapproval_transactions) - + transactions = payment_transactions | preapproval_transactions - - + + for t in transactions: - + # deal with preapprovals if t.date_payment is None: preapproval_status = self.update_preapproval(t) - logger.info("transaction: {0}, preapproval_status: {1}".format(t, preapproval_status)) + logger.info( + "transaction: %s, preapproval_status: %s", t, preapproval_status) if not set(['status', 'currency', 'amount', 'approved']).isdisjoint(set(preapproval_status.keys())): status["preapprovals"].append(preapproval_status) # update payments @@ -207,132 +209,147 @@ def checkStatus(self, past_days=None, transactions=None): payment_status = self.update_payment(t) if not set(["status", "receivers"]).isdisjoint(payment_status.keys()): status["payments"].append(payment_status) - + # Clear out older, duplicate preapproval transactions cleared_list = [] for p in transactions: - + # pick out only the preapprovals if p.date_payment is None and p.type == PAYMENT_TYPE_AUTHORIZATION and p.status == TRANSACTION_STATUS_ACTIVE and p not in cleared_list: - - # keep only the newest transaction for this user and campaign - user_transactions_for_campaign = Transaction.objects.filter(user=p.user, status=TRANSACTION_STATUS_ACTIVE, campaign=p.campaign).order_by('-date_authorized') - + + # keep only the newest transaction for this user and campaign + user_transactions_for_campaign = Transaction.objects.filter( + user=p.user, + status=TRANSACTION_STATUS_ACTIVE, + campaign=p.campaign).order_by('-date_authorized') + if len(user_transactions_for_campaign) > 1: - logger.info("Found %d active transactions for campaign" % len(user_transactions_for_campaign)) - self.cancel_related_transaction(user_transactions_for_campaign[0], status=TRANSACTION_STATUS_ACTIVE, campaign=transactions[0].campaign) - + logger.info("Found %d active transactions for campaign", len(user_transactions_for_campaign)) + self.cancel_related_transaction( + user_transactions_for_campaign[0], + status=TRANSACTION_STATUS_ACTIVE, + campaign=transactions[0].campaign) + cleared_list.extend(user_transactions_for_campaign) - + # Note, we may need to call checkstatus again here - + return status - - - def run_query(self, transaction_list, summary=True, campaign_total=False, pledged=False, authorized=False, incomplete=False, completed=False, pending=False, error=False, failed=False, **kwargs): + + + def run_query(self, transaction_list, summary=True, campaign_total=False, pledged=False, + authorized=False, incomplete=False, completed=False, pending=False, error=False, + failed=False, **kwargs): ''' - Generic query handler for returning summary and transaction info, see query_user and query_campaign - + Generic query handler for returning summary and transaction info, + see query_user and query_campaign + campaign_total=True includes all payment types which should count towards campaign total - + ''' if campaign_total: # must double check when adding Paypal or other # return only ACTIVE transactions with approved=True - list = transaction_list.filter(type=PAYMENT_TYPE_AUTHORIZATION, - approved=True).exclude(status=TRANSACTION_STATUS_CANCELED) - list = list | transaction_list.filter(type=PAYMENT_TYPE_INSTANT, - status=TRANSACTION_STATUS_COMPLETE) - else: - list = Transaction.objects.none() + tlist = transaction_list.filter( + type=PAYMENT_TYPE_AUTHORIZATION, + approved=True).exclude(status=TRANSACTION_STATUS_CANCELED) + tlist = tlist | transaction_list.filter(type=PAYMENT_TYPE_INSTANT, + status=TRANSACTION_STATUS_COMPLETE) + else: + tlist = Transaction.objects.none() if pledged: - list = list | transaction_list.filter(type=PAYMENT_TYPE_INSTANT, - status=TRANSACTION_STATUS_COMPLETE) - + tlist = tlist | transaction_list.filter(type=PAYMENT_TYPE_INSTANT, + status=TRANSACTION_STATUS_COMPLETE) + if authorized: # return only ACTIVE transactions with approved=True - list = list | transaction_list.filter(type=PAYMENT_TYPE_AUTHORIZATION, - status=TRANSACTION_STATUS_ACTIVE, - approved=True) + tlist = tlist | transaction_list.filter(type=PAYMENT_TYPE_AUTHORIZATION, + status=TRANSACTION_STATUS_ACTIVE, + approved=True) if incomplete: - list = list | transaction_list.filter(type=PAYMENT_TYPE_AUTHORIZATION, - status=TRANSACTION_STATUS_INCOMPLETE) + tlist = tlist | transaction_list.filter(type=PAYMENT_TYPE_AUTHORIZATION, + status=TRANSACTION_STATUS_INCOMPLETE) if completed: - list = list | transaction_list.filter(type=PAYMENT_TYPE_AUTHORIZATION, - status=TRANSACTION_STATUS_COMPLETE) + tlist = tlist | transaction_list.filter(type=PAYMENT_TYPE_AUTHORIZATION, + status=TRANSACTION_STATUS_COMPLETE) if pending: - list = list | transaction_list.filter(type=PAYMENT_TYPE_AUTHORIZATION, - status=TRANSACTION_STATUS_PENDING) + tlist = tlist | transaction_list.filter(type=PAYMENT_TYPE_AUTHORIZATION, + status=TRANSACTION_STATUS_PENDING) if error: - list = list | transaction_list.filter(type=PAYMENT_TYPE_AUTHORIZATION, - status=TRANSACTION_STATUS_ERROR) + tlist = tlist | transaction_list.filter(type=PAYMENT_TYPE_AUTHORIZATION, + status=TRANSACTION_STATUS_ERROR) if failed: - list = list | transaction_list.filter(type=PAYMENT_TYPE_AUTHORIZATION, - status=TRANSACTION_STATUS_FAILED) + tlist = tlist | transaction_list.filter(type=PAYMENT_TYPE_AUTHORIZATION, + status=TRANSACTION_STATUS_FAILED) if summary: amount = D('0.00') - for t in list: + for t in tlist: amount += t.amount return amount - - else: - return list - def query_user(self, user, **kwargs): + return tlist + + def query_user(self, user, **kwargs): ''' query_user - + Returns either an amount or list of transactions for a user - + summary: if true, return a float of the total, if false, return a list of transactions - + return value: either a float summary or a list of transactions Note: this method appears to be unused. - - ''' - + + ''' + transactions = Transaction.objects.filter(user=user) - return self.run_query(transactions, **kwargs) - - def query_campaign(self, campaign, **kwargs ): + return self.run_query(transactions, **kwargs) + + def query_campaign(self, campaign, **kwargs): ''' query_campaign - + Returns either an amount or list of transactions for a campaign - + summary: if true, return a float of the total, if false, return a list of transactions - + return value: either a float summary or a list of transactions - - ''' - + + ''' + transactions = Transaction.objects.filter(campaign=campaign) return self.run_query(transactions, **kwargs) - - + + def execute_campaign(self, campaign): ''' execute_campaign - - attempts to execute all pending transactions for a campaign. - - return value: returns a list of transactions with the status of each receiver/transaction updated - - ''' - - # only allow active transactions to go through again, if there is an error, intervention is needed - transactions = Transaction.objects.filter(campaign=campaign, status=TRANSACTION_STATUS_ACTIVE) + + attempts to execute all pending transactions for a campaign. + + return value: returns a list of transactions with the status + of each receiver/transaction updated + + ''' + + # only allow active transactions to go through again, + # if there is an error, intervention is needed + transactions = Transaction.objects.filter( + campaign=campaign, status=TRANSACTION_STATUS_ACTIVE) results = [] - + for t in transactions: - # + # # Currently receivers are only used for paypal, so it is OK to leave the paypal info here # - receiver_list = [{'email':settings.PAYPAL_GLUEJAR_EMAIL, 'amount':t.amount}, - {'email':campaign.paypal_receiver, 'amount':D(t.amount) * (D('1.00') - D(str(settings.GLUEJAR_COMMISSION)))}] - + receiver_list = [ + {'email':settings.PAYPAL_GLUEJAR_EMAIL, 'amount':t.amount}, + {'email':campaign.paypal_receiver, + 'amount':D(t.amount) * (D('1.00') - D(str(settings.GLUEJAR_COMMISSION)))} + ] + try: self.execute_transaction(t, receiver_list) except Exception as e: @@ -345,251 +362,257 @@ def execute_campaign(self, campaign): def finish_campaign(self, campaign): ''' finish_campaign - + attempts to execute all remaining payment to non-primary receivers This is currently only supported for paypal - - return value: returns a list of transactions with the status of each receiver/transaction updated - - ''' - - # QUESTION: to figure out which transactions are in a state in which the payment to the primary recipient is done but not to secondary recipient - # Consider two possibilities: status=IPN_PAY_STATUS_INCOMPLETE or execution = EXECUTE_TYPE_CHAINED_DELAYED + + return value: returns a list of transactions with the status of each + receiver/transaction updated + + ''' + + # QUESTION: to figure out which transactions are in a state in which the payment to the + # primary recipient is done but not to secondary recipient + # Consider two possibilities: status=IPN_PAY_STATUS_INCOMPLETE or + # execution = EXECUTE_TYPE_CHAINED_DELAYED # which one? Let's try the second one - # only allow incomplete transactions to go through again, if there is an error, intervention is needed - transactions = Transaction.objects.filter(campaign=campaign, execution=EXECUTE_TYPE_CHAINED_DELAYED) - - for t in transactions: - result = self.finish_transaction(t) + # only allow incomplete transactions to go through again, + # if there is an error, intervention is needed + transactions = Transaction.objects.filter(campaign=campaign, + execution=EXECUTE_TYPE_CHAINED_DELAYED) + + for t in transactions: + result = self.finish_transaction(t) # TO DO: update campaign status - - + + return transactions - + def cancel_campaign(self, campaign, reason="UNSUCCESSFUL CAMPAIGN"): ''' cancel_campaign - - attempts to cancel active preapprovals related to the campaign - - - return value: returns a list of transactions with the status of each receiver/transaction updated - - ''' - - transactions = Transaction.objects.filter(campaign=campaign, status=TRANSACTION_STATUS_ACTIVE) - - for t in transactions: + + attempts to cancel active preapprovals related to the campaign + + + return value: returns a list of transactions with the status + of each receiver/transaction updated + + ''' + + transactions = Transaction.objects.filter(campaign=campaign, + status=TRANSACTION_STATUS_ACTIVE) + + for t in transactions: result = self.cancel_transaction(t) if result: t.reason = reason t.save() - return transactions - + return transactions + def finish_transaction(self, transaction): ''' finish_transaction - + calls the paypal API to execute payment to non-primary receivers - + transaction: the transaction we want to complete - + ''' - + if transaction.execution != EXECUTE_TYPE_CHAINED_DELAYED: logger.error("FinishTransaction called with invalid execution type") return False - + # mark this transaction as executed transaction.date_executed = now() transaction.save() - - p = transaction.get_payment_class().Finish(transaction) - + + p = transaction.get_payment_class().Finish(transaction) + # Create a response for this envelope = p.envelope() - + if envelope: correlation = p.correlation_id() timestamp = p.timestamp() - + r = PaymentResponse.objects.create(api=p.url, - correlation_id = correlation, - timestamp = timestamp, - info = p.raw_response, - transaction=transaction) - + correlation_id=correlation, + timestamp=timestamp, + info=p.raw_response, + transaction=transaction) + if p.success() and not p.error(): logger.info("finish_transaction Success") return True - - else: - transaction.error = p.error_string() - transaction.save() - logger.info("finish_transaction error " + p.error_string()) - return False - + + transaction.error = p.error_string() + transaction.save() + logger.info("finish_transaction error " + p.error_string()) + return False + def execute_transaction(self, transaction, receiver_list): ''' execute_transaction - + executes a single pending transaction. - + transaction: the transaction object to execute receiver_list: a list of receivers for the transaction, in this format: - + [ - {'email':'email-1', 'amount':amount1}, + {'email':'email-1', 'amount':amount1}, {'email':'email-2', 'amount':amount2} ] - - return value: a bool indicating the success or failure of the process. Please check the transaction status + + return value: a bool indicating the success or failure of the process. + Please check the transaction status after the IPN has completed for full information - - ''' - + + ''' + if len(transaction.receiver_set.all()) > 0: # we are re-submitting a transaction, wipe the old receiver list transaction.receiver_set.all().delete() - + transaction.create_receivers(receiver_list) - + # Mark as payment attempted so we will poll this periodically for status changes transaction.set_payment() - + p = transaction.get_payment_class().Execute(transaction) - + # Create a response for this envelope = p.envelope() - + if envelope: - + correlation = p.correlation_id() timestamp = p.timestamp() - + r = PaymentResponse.objects.create(api=p.api(), - correlation_id = correlation, - timestamp = timestamp, - info = p.raw_response, - transaction=transaction) - + correlation_id=correlation, + timestamp=timestamp, + info=p.raw_response, + transaction=transaction) + # We will update our transaction status when we receive the IPN if p.success() and not p.error(): transaction.pay_key = p.key() transaction.save() return True - - else: - transaction.error = p.error_string() - transaction.save() - logger.info("execute_transaction Error: " + p.error_string()) - return False - + + transaction.error = p.error_string() + transaction.save() + logger.info("execute_transaction Error: %s", p.error_string()) + return False + def cancel_transaction(self, transaction): ''' cancel - + cancels a pre-approved transaction - + return value: True if successful, false otherwise - ''' - + ''' + # does this transaction explicity require preapprovals? requires_explicit_preapprovals = transaction.get_payment_class().requires_explicit_preapprovals - + if requires_explicit_preapprovals: - - p = transaction.get_payment_class().CancelPreapproval(transaction) - + + p = transaction.get_payment_class().CancelPreapproval(transaction) + # Create a response for this envelope = p.envelope() - + if envelope: - + correlation = p.correlation_id() timestamp = p.timestamp() - + r = PaymentResponse.objects.create(api=p.url, - correlation_id = correlation, - timestamp = timestamp, - info = p.raw_response, - transaction=transaction) - + correlation_id=correlation, + timestamp=timestamp, + info=p.raw_response, + transaction=transaction) + if p.success() and not p.error(): - logger.info("Cancel Transaction " + str(transaction.id) + " Completed") + logger.info("Cancel Transaction %s Completed", transaction.id) return True - - else: - transaction.error = p.error_string() - transaction.save() - logger.info("Cancel Transaction " + str(transaction.id) + " Failed with error: " + p.error_string()) - return False - - else: - - # if no explicit preapproval required, we just have to mark the transaction as cancelled. - transaction.status = TRANSACTION_STATUS_CANCELED + + transaction.error = p.error_string() transaction.save() - return True - - def authorize(self, transaction, expiry= None, return_url=None, paymentReason="unglue.it Pledge", modification=False): + logger.info("Cancel Transaction %s Failed with error: %s", transaction.id, p.error_string()) + return False + + # if no explicit preapproval required, we just have to mark the transaction as cancelled. + transaction.status = TRANSACTION_STATUS_CANCELED + transaction.save() + return True + + def authorize(self, transaction, expiry=None, return_url=None, + paymentReason="unglue.it Pledge", modification=False): ''' authorize - + authorizes a set amount of money to be collected at a later date - + return_url: url to redirect supporter to after a successful transaction paymentReason: a memo line that will show up in the unglue.it accounting modification: whether this authorize call is part of a modification of an existing pledge - - return value: a tuple of the new transaction object and a re-direct url. If the process fails, - the redirect url will be None - - ''' - + + return value: a tuple of the new transaction object and a re-direct url. + If the process fails, the redirect url will be None + + ''' + if transaction.host == PAYMENT_HOST_NONE: #TODO send user to select a payment processor -- for now, set to a system setting - transaction.host = settings.PAYMENT_PROCESSOR - + transaction.host = settings.PAYMENT_PROCESSOR + # we might want to not allow for a return_url to be passed in but calculated # here because we have immediate access to the Transaction object. - - + + if return_url is None: - return_path = "{0}?{1}".format(reverse('pledge_complete'), - urllib.urlencode({'tid':transaction.id})) - return_url = urlparse.urljoin(settings.BASE_URL_SECURE, return_path) - - p = transaction.get_payment_class().Preapproval(transaction, transaction.max_amount, expiry, return_url=return_url, paymentReason=paymentReason) - + return_path = "{0}?{1}".format(reverse('pledge_complete'), + urlencode({'tid':transaction.id})) + return_url = urljoin(settings.BASE_URL_SECURE, return_path) + + p = transaction.get_payment_class().Preapproval( + transaction, transaction.max_amount, + expiry, return_url=return_url, paymentReason=paymentReason) + # Create a response for this envelope = p.envelope() - - if envelope: + + if envelope: r = PaymentResponse.objects.create(api=p.url, - correlation_id = p.correlation_id(), - timestamp = p.timestamp(), - info = p.raw_response, - transaction=transaction) - + correlation_id=p.correlation_id(), + timestamp=p.timestamp(), + info=p.raw_response, + transaction=transaction) + if p.success() and not p.error(): transaction.preapproval_key = p.key() transaction.save() - + # it make sense for the payment processor library to calculate next_url when # user is redirected there. But if no redirection is required, send user # straight on to the return_url url = p.next_url() - + if url is None: url = return_url - + logger.info("Authorize Success: " + url if url is not None else '') - + # modification and initial pledge use different notification templates -- # decide which to send # we need to fire notifications at the first point at which we are sure @@ -598,87 +621,90 @@ def authorize(self, transaction, expiry= None, return_url=None, paymentReason=" # that for whatever reason fail. will need other housekeeping to handle those. # sadly this point is not yet late enough in the process -- needs to be moved # until after we are certain. - + if not modification: - # BUGBUG: + # BUGBUG: # send the notice here for now - # this is actually premature since we're only about to send the user off to the payment system to - # authorize a charge + # this is actually premature since we're only about to send the user off to the + # payment system to authorize a charge pledge_created.send(sender=self, transaction=transaction) - + return transaction, url - - - else: - transaction.error = p.error_string() - transaction.save() - logger.info("Authorize Error: " + p.error_string()) - return transaction, None - def charge(self, transaction, return_url=None, paymentReason="unglue.it Purchase", token = None): + transaction.error = p.error_string() + transaction.save() + logger.info("Authorize Error: %s", p.error_string()) + return transaction, None + + def charge(self, transaction, return_url=None, paymentReason="unglue.it Purchase", token=None): ''' charge - - immediately attempt to collect on transaction - + + immediately attempt to collect on transaction + return_url: url to redirect supporter to after a successful transaction paymentReason: a memo line that will show up in our stripe accounting - - return value: a tuple of the new transaction object and a re-direct url. If the process fails, - the redirect url will be None - + + return value: a tuple of the new transaction object and a re-direct url. + If the process fails, the redirect url will be None + ''' if transaction.host == PAYMENT_HOST_NONE: #TODO send user to select a payment processor -- for now, set to a system setting - transaction.host = settings.PAYMENT_PROCESSOR - + transaction.host = settings.PAYMENT_PROCESSOR + # we might want to not allow for a return_url to be passed in but calculated # here because we have immediate access to the Transaction object. charge_amount = transaction.needed_amount - if transaction.credit_amount > 0 : - success = credit.pay_transaction(transaction, transaction.user, transaction.campaign.user_to_pay, transaction.credit_amount) + if transaction.credit_amount > 0: + success = credit.pay_transaction( + transaction, transaction.user, + transaction.campaign.user_to_pay, + transaction.credit_amount + ) if not success: #shouldn't happen - logger.error('could not use credit for transaction %s' % transaction.id) - charge_amount =transaction.max_amount - p = transaction.get_payment_class().Pay(transaction, amount=charge_amount, return_url=return_url, paymentReason=paymentReason, token=token) - - + logger.error('could not use credit for transaction %s', transaction.id) + charge_amount = transaction.max_amount + p = transaction.get_payment_class().Pay( + transaction, amount=charge_amount, + return_url=return_url, paymentReason=paymentReason, token=token) + + if p.success() and not p.error(): transaction.preapproval_key = p.key() transaction.execution = EXECUTE_TYPE_INSTANT transaction.set_executed() # also does the save - + # it make sense for the payment processor library to calculate next_url when # user is redirected there. But if no redirection is required, send user # straight on to the return_url url = p.next_url() - + if url is None: url = return_url - + logger.info("Pay Success: " + url if url is not None else '') - + return transaction, url - - - else: - transaction.error = p.error_string() - transaction.save() - logger.info("Pay Error: " + p.error_string()) - return transaction, None + + transaction.error = p.error_string() + transaction.save() + logger.info("Pay Error: %s", p.error_string()) + return transaction, None + - - def process_transaction(self, currency, amount, host=PAYMENT_HOST_NONE, campaign=None, - user=None, return_url=None, paymentReason="unglue.it Pledge", pledge_extra=None, - donation=False, modification=False): + def process_transaction( + self, currency, amount, host=PAYMENT_HOST_NONE, campaign=None, + user=None, return_url=None, paymentReason="unglue.it Pledge", pledge_extra=None, + donation=False, modification=False): ''' process - - saves and processes a proposed transaction; decides if the transaction should be processed + + saves and processes a proposed transaction; decides if the transaction should be processed immediately. - + currency: a 3-letter currency code, i.e. USD amount: the amount to authorize host: the name of the processing module; if none, send user back to decide! @@ -688,11 +714,11 @@ def process_transaction(self, currency, amount, host=PAYMENT_HOST_NONE, campaig paymentReason: a memo line that will show up in the Payer's Amazon (and Paypal?) account modification: whether this authorize call is part of a modification of an existing pledge pledge_extra: extra pledge stuff - - return value: a tuple of the new transaction object and a re-direct url. + + return value: a tuple of the new transaction object and a re-direct url. If the process fails, the redirect url will be None donation: transaction is a donation - ''' + ''' # set the expiry date based on the campaign deadline if campaign and campaign.deadline: expiry = campaign.deadline + timedelta(days=settings.PREAPPROVAL_PERIOD_AFTER_CAMPAIGN) @@ -700,28 +726,29 @@ def process_transaction(self, currency, amount, host=PAYMENT_HOST_NONE, campaig expiry = now() + timedelta(days=settings.PREAPPROVAL_PERIOD_AFTER_CAMPAIGN) t = Transaction.create( - amount=0, - host = host, - max_amount=amount, - currency=currency, - campaign=campaign, - user=user, - pledge_extra=pledge_extra, - donation=donation, + amount=0, + host=host, + max_amount=amount, + currency=currency, + campaign=campaign, + user=user, + pledge_extra=pledge_extra, + donation=donation, + reason=paymentReason, ) t.save() # does user have enough credit to transact now? - if user.is_authenticated() and user.credit.available >= amount : + if user.is_authenticated and user.credit.available >= amount: # YES! - return_path = "{0}?{1}".format(reverse('pledge_complete'), - urllib.urlencode({'tid':t.id})) - return_url = urlparse.urljoin(settings.BASE_URL_SECURE, return_path) - if campaign.is_pledge() : - success = credit.pledge_transaction(t,user,amount) + return_path = "{0}?{1}".format(reverse('pledge_complete'), + urlencode({'tid':t.id})) + return_url = urljoin(settings.BASE_URL_SECURE, return_path) + if campaign.is_pledge(): + success = credit.pledge_transaction(t, user, amount) if success: pledge_created.send(sender=self, transaction=t) else: - success = credit.pay_transaction(t,user,t.campaign.user_to_pay, amount) + success = credit.pay_transaction(t, user, t.campaign.user_to_pay, amount) if success: t.amount = amount t.host = PAYMENT_HOST_CREDIT @@ -732,39 +759,40 @@ def process_transaction(self, currency, amount, host=PAYMENT_HOST_NONE, campaig transaction_charged.send(sender=self, transaction=t) if success: return t, return_url - else: - # shouldn't happen - logger.error('could not use credit for transaction %s' % t.id) - - + + # shouldn't happen + logger.error('could not use credit for transaction %s', t.id) + + # send user to choose payment path return t, reverse('fund', args=[t.id]) - - def cancel_related_transaction(self, transaction, status=TRANSACTION_STATUS_ACTIVE, campaign=None): + + def cancel_related_transaction(self, transaction, + status=TRANSACTION_STATUS_ACTIVE, campaign=None): ''' Cancels any other similar status transactions for the same campaign. Used with modify code - + Returns the number of transactions successfully canceled ''' - + related_transactions = Transaction.objects.filter(status=status, user=transaction.user) - + if len(related_transactions) == 0: return 0 - + if campaign: related_transactions = related_transactions.filter(campaign=campaign) - + canceled = 0 - + for t in related_transactions: - + if t.id == transaction.id: # keep our transaction continue - - if self.cancel_transaction(t): + + if self.cancel_transaction(t): canceled = canceled + 1 # send notice about modification of transaction if transaction.amount > t.amount: @@ -776,119 +804,129 @@ def cancel_related_transaction(self, transaction, status=TRANSACTION_STATUS_ACTI else: # we shouldn't expect any case in which this happens up_or_down = None - + pledge_modified.send(sender=self, transaction=transaction, up_or_down=up_or_down) else: - logger.error("Failed to cancel transaction {0} for related transaction {1} ".format(t, transaction)) - + logger.error( + "Failed to cancel transaction %s for related transaction %s ", t, transaction) + return canceled - + def modify_transaction(self, transaction, amount, expiry=None, pledge_extra=None, return_url=None, nevermind_url=None, paymentReason=None): ''' modify - - Modifies a transaction. - 2 main situations: if the new amount is less than max_amount, no need to go out to Stripe again + + Modifies a transaction. + 2 main situations: if the new amount is less than max_amount, + no need to go out to Stripe again if new amount is greater than max_amount...need to go out and get new approval. to start with, we can use the standard pledge_complete, pledge_cancel machinery might have to modify the pledge_complete, pledge_cancel because the messages are going to be different because we're modifying a pledge rather than a new one. - + amount: the new amount expiry: the new expiration date, or if none the current expiration date will be used return_url: the return URL after the preapproval(if needed) paymentReason: a memo line that will show up in the Payer's Amazon (and Paypal?) account - - return value: True if successful, False otherwise. An optional second parameter for the forward URL if a new authorhization is needed + + return value: True if successful, False otherwise. An optional second parameter for the + forward URL if a new authorhization is needed ''' - - logger.info("transaction.id: {0}, amount:{1}".format(transaction.id, amount)) + + logger.info("transaction.id: %s, amount: %s", transaction.id, amount) if amount < transaction.amount: up_or_down = "decreased" elif amount > transaction.amount: - up_or_down = "increased" + up_or_down = "increased" else: - up_or_down = "modified" + up_or_down = "modified" - # if expiry is None, use the existing value + # if expiry is None, use the existing value if expiry is None: expiry = transaction.date_expired - + # does this transaction explicity require preapprovals? - + requires_explicit_preapprovals = transaction.get_payment_class().requires_explicit_preapprovals - + if transaction.type != PAYMENT_TYPE_AUTHORIZATION: logger.info("Error, attempt to modify an invalid transaction type") return False, None - - # Can only modify an active, pending transaction. If it is completed, we need to do a refund. If it is incomplete, - # then an IPN may be pending and we cannot touch it + + # Can only modify an active, pending transaction. If it is completed, + # we need to do a refund. If it is incomplete, + # then an IPN may be pending and we cannot touch it if transaction.status != TRANSACTION_STATUS_ACTIVE: logger.info("Error, attempt to modify a transaction that is not active") return False, None - + if transaction.host == PAYMENT_HOST_CREDIT: # does user have enough credit to pledge now? - if transaction.user.credit.available >= amount-transaction.amount : + if transaction.user.credit.available >= amount-transaction.amount: # YES! transaction.set_pledge_extra(pledge_extra) - credit.pledge_transaction(transaction,transaction.user,amount) - return_path = "{0}?{1}".format(reverse('pledge_complete'), - urllib.urlencode({'tid':transaction.id})) - return_url = urlparse.urljoin(settings.BASE_URL_SECURE, return_path) - - logger.info("Updated amount of transaction to %f" % amount) - pledge_modified.send(sender=self, transaction=transaction,up_or_down=up_or_down) + credit.pledge_transaction(transaction, transaction.user, amount) + return_path = "{0}?{1}".format(reverse('pledge_complete'), + urlencode({'tid':transaction.id})) + return_url = urljoin(settings.BASE_URL_SECURE, return_path) + + logger.info("Updated amount of transaction to %f", amount) + pledge_modified.send(sender=self, transaction=transaction, up_or_down=up_or_down) return transaction, return_url - else: - # cancel old transaction, send user to choose new payment path - # set the expiry date based on the campaign deadline - expiry = transaction.deadline_or_now + timedelta( days=settings.PREAPPROVAL_PERIOD_AFTER_CAMPAIGN ) - t = Transaction.create(amount=0, - max_amount=amount, - currency=transaction.currency, - status=TRANSACTION_STATUS_MODIFIED, - campaign=transaction.campaign, - user=transaction.user, - pledge_extra=pledge_extra - ) - t.save() - credit.Processor.CancelPreapproval(transaction) - return t, reverse('fund_%s'%campaign.type, args=[t.id]) + + # cancel old transaction, send user to choose new payment path + # set the expiry date based on the campaign deadline + expiry = transaction.deadline_or_now + timedelta( + days=settings.PREAPPROVAL_PERIOD_AFTER_CAMPAIGN) + t = Transaction.create( + amount=0, + max_amount=amount, + currency=transaction.currency, + status=TRANSACTION_STATUS_MODIFIED, + campaign=transaction.campaign, + user=transaction.user, + pledge_extra=pledge_extra + ) + t.save() + credit.Processor.CancelPreapproval(transaction) + return t, reverse('fund', args=[t.id]) elif requires_explicit_preapprovals and (amount > transaction.max_amount or expiry != transaction.date_expired): # set the expiry date based on the campaign deadline - expiry = transaction.deadline_or_now + timedelta( days=settings.PREAPPROVAL_PERIOD_AFTER_CAMPAIGN ) - + expiry = transaction.deadline_or_now + timedelta( + days=settings.PREAPPROVAL_PERIOD_AFTER_CAMPAIGN) + # Start a new transaction for the new amount - t = Transaction.create(amount=amount, - max_amount=amount, - host=transaction.host, - currency=transaction.currency, - status=TRANSACTION_STATUS_CREATED, - campaign=transaction.campaign, - user=transaction.user, - pledge_extra=pledge_extra - ) + t = Transaction.create( + amount=amount, + max_amount=amount, + host=transaction.host, + currency=transaction.currency, + status=TRANSACTION_STATUS_CREATED, + campaign=transaction.campaign, + user=transaction.user, + pledge_extra=pledge_extra + ) t.save() - t, url = self.authorize(transaction, - expiry=expiry if expiry else transaction.date_expired, - return_url=return_url, - paymentReason=paymentReason, - modification=True - ) - + t, url = self.authorize( + transaction, + expiry=expiry if expiry else transaction.date_expired, + return_url=return_url, + paymentReason=paymentReason, + modification=True + ) + if t and url: # Need to re-direct to approve the transaction - logger.info("New authorization needed, redirection to url %s" % url) - - # Do not cancel the transaction here, wait until we get confirmation that the transaction is complete + logger.info("New authorization needed, redirection to url %s", url) + + # Do not cancel the transaction here, wait until we get confirmation that + # the transaction is complete # then cancel all other active transactions for this campaign - #self.cancel_transaction(transaction) + #self.cancel_transaction(transaction) # while it would seem to make sense to send a pledge notification change here # if we do, we will also send notifications when we initiate but do not @@ -901,14 +939,15 @@ def modify_transaction(self, transaction, amount, expiry=None, pledge_extra=None # should we send a pledge_modified signal with state="failed" and a # corresponding notification to the user? that would go here. return False, None - - elif (requires_explicit_preapprovals and amount <= transaction.max_amount) or (not requires_explicit_preapprovals): + + elif (requires_explicit_preapprovals and amount <= transaction.max_amount) or \ + (not requires_explicit_preapprovals): # Update transaction but leave the preapproval alone transaction.amount = amount transaction.set_pledge_extra(pledge_extra) - + transaction.save() - logger.info("Updated amount of transaction to %f" % amount) + logger.info("Updated amount of transaction to %f", amount) # when modifying pledges happens immediately and only within our # db, we don't have to wait until we hear back to be assured of # success; send the notification immediately @@ -917,62 +956,60 @@ def modify_transaction(self, transaction, amount, expiry=None, pledge_extra=None else: # this shouldn't happen return False, None - - + + def refund_transaction(self, transaction): ''' refund - - Refunds a transaction. The money for the transaction may have gone to a number of places. We can only - refund money that is in our account - + + Refunds a transaction. The money for the transaction may have gone to a number of places. + We can only refund money that is in our account + return value: True if successful, false otherwise - ''' - - # First check if a payment has been made. It is possible that some of the receivers may be incomplete + ''' + + # First check if a payment has been made. + # It is possible that some of the receivers may be incomplete # We need to verify that the refund API will cancel these if transaction.status != TRANSACTION_STATUS_COMPLETE: logger.info("Refund Transaction failed, invalid transaction status") return False - - p = transaction.get_payment_class().RefundPayment(transaction) - + + p = transaction.get_payment_class().RefundPayment(transaction) + # Create a response for this envelope = p.envelope() - + if envelope: - correlation = p.correlation_id() timestamp = p.timestamp() - + r = PaymentResponse.objects.create(api=p.url, - correlation_id = correlation, - timestamp = timestamp, - info = p.raw_response, - transaction=transaction) - + correlation_id=correlation, + timestamp=timestamp, + info=p.raw_response, + transaction=transaction) + if p.success() and not p.error(): - logger.info("Refund Transaction " + str(transaction.id) + " Completed") + logger.info("Refund Transaction %s Completed", transaction.id) return True - - else: - transaction.error = p.error_string() - transaction.save() - logger.info("Refund Transaction " + str(transaction.id) + " Failed with error: " + p.error_string()) - return False - + + transaction.error = p.error_string() + transaction.save() + logger.info("Refund Transaction %s Failed with error: %s", + transaction.id, p.error_string()) + return False + def make_account(self, user=None, host=None, token=None): """delegate to a specific payment module the task of creating a payment account""" - + mod = __import__("regluit.payment." + host, fromlist=[host]) return mod.Processor().make_account(user=user, token=token) - + def retrieve_accounts(self, user, host, include_deactivated=False): """return any accounts that match user, host -- only active ones by default""" - + if include_deactivated: return Account.objects.filter(user=user, host=host) - else: - return Account.objects.filter(user=user, host=host, date_deactivated__isnull=True) - + return Account.objects.filter(user=user, host=host, date_deactivated__isnull=True) diff --git a/payment/migrations/0001_initial.py b/payment/migrations/0001_initial.py index d47d35275..6137c7e20 100644 --- a/payment/migrations/0001_initial.py +++ b/payment/migrations/0001_initial.py @@ -31,7 +31,7 @@ class Migration(migrations.Migration): ('date_modified', models.DateTimeField(auto_now=True)), ('date_deactivated', models.DateTimeField(null=True)), ('status', models.CharField(default=b'ACTIVE', max_length=11, choices=[(b'ACTIVE', b'ACTIVE'), (b'DEACTIVATED', b'DEACTIVATED'), (b'EXPIRED', b'EXPIRED'), (b'EXPIRING', b'EXPIRING'), (b'ERROR', b'ERROR')])), - ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True)), + ('user', models.ForeignKey(on_delete=models.CASCADE, to=settings.AUTH_USER_MODEL, null=True)), ], ), migrations.CreateModel( @@ -41,7 +41,7 @@ class Migration(migrations.Migration): ('balance', models.DecimalField(default=Decimal('0.00'), max_digits=14, decimal_places=2)), ('pledged', models.DecimalField(default=Decimal('0.00'), max_digits=14, decimal_places=2)), ('last_activity', models.DateTimeField(auto_now=True)), - ('user', models.OneToOneField(related_name='credit', to=settings.AUTH_USER_MODEL)), + ('user', models.OneToOneField(on_delete=models.CASCADE, related_name='credit', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( @@ -52,7 +52,7 @@ class Migration(migrations.Migration): ('timestamp', models.DateTimeField(auto_now=True)), ('action', models.CharField(max_length=16)), ('sent', models.IntegerField(null=True)), - ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True)), + ('user', models.ForeignKey(on_delete=models.CASCADE, to=settings.AUTH_USER_MODEL, null=True)), ], ), migrations.CreateModel( @@ -116,20 +116,20 @@ class Migration(migrations.Migration): ('date_expired', models.DateTimeField(null=True)), ('extra', jsonfield.fields.JSONField(default={}, null=True)), ('anonymous', models.BooleanField(default=False)), - ('campaign', models.ForeignKey(to='core.Campaign', null=True)), - ('offer', models.ForeignKey(to='core.Offer', null=True)), - ('premium', models.ForeignKey(to='core.Premium', null=True)), - ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True)), + ('campaign', models.ForeignKey(on_delete=models.CASCADE, to='core.Campaign', null=True)), + ('offer', models.ForeignKey(on_delete=models.CASCADE, to='core.Offer', null=True)), + ('premium', models.ForeignKey(on_delete=models.CASCADE, to='core.Premium', null=True)), + ('user', models.ForeignKey(on_delete=models.CASCADE, to=settings.AUTH_USER_MODEL, null=True)), ], ), migrations.AddField( model_name='receiver', name='transaction', - field=models.ForeignKey(to='payment.Transaction'), + field=models.ForeignKey(on_delete=models.CASCADE, to='payment.Transaction'), ), migrations.AddField( model_name='paymentresponse', name='transaction', - field=models.ForeignKey(to='payment.Transaction'), + field=models.ForeignKey(on_delete=models.CASCADE, to='payment.Transaction'), ), ] diff --git a/payment/migrations/0003_auto_20200214_1347.py b/payment/migrations/0003_auto_20200214_1347.py new file mode 100644 index 000000000..5c4e91a9a --- /dev/null +++ b/payment/migrations/0003_auto_20200214_1347.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.11.28 on 2020-02-14 13:47 +from __future__ import unicode_literals + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('payment', '0002_transaction_donation'), + ] + + operations = [ + migrations.AlterField( + model_name='account', + name='host', + field=models.CharField(default='none', max_length=32), + ), + migrations.AlterField( + model_name='account', + name='status', + field=models.CharField(choices=[('ACTIVE', 'ACTIVE'), ('DEACTIVATED', 'DEACTIVATED'), ('EXPIRED', 'EXPIRED'), ('EXPIRING', 'EXPIRING'), ('ERROR', 'ERROR')], default='ACTIVE', max_length=11), + ), + migrations.AlterField( + model_name='transaction', + name='currency', + field=models.CharField(default='USD', max_length=10, null=True), + ), + migrations.AlterField( + model_name='transaction', + name='host', + field=models.CharField(default='none', max_length=32), + ), + migrations.AlterField( + model_name='transaction', + name='local_status', + field=models.CharField(default='NONE', max_length=32, null=True), + ), + migrations.AlterField( + model_name='transaction', + name='status', + field=models.CharField(default='None', max_length=32), + ), + ] diff --git a/payment/models.py b/payment/models.py index 3cf6a8f5a..5af6c54e4 100644 --- a/payment/models.py +++ b/payment/models.py @@ -28,18 +28,16 @@ ## regluit imports from regluit.payment.parameters import ( - PAYMENT_TYPE_NONE, - PAYMENT_TYPE_AUTHORIZATION, - - PAYMENT_HOST_NONE, - - PAYMENT_HOST_CREDIT, - EXECUTE_TYPE_NONE, - TRANSACTION_STATUS_NONE, + FUNDS, + PAYMENT_HOST_CREDIT, + PAYMENT_HOST_NONE, + PAYMENT_TYPE_AUTHORIZATION, + PAYMENT_TYPE_NONE, TRANSACTION_STATUS_ACTIVE, TRANSACTION_STATUS_ERROR, TRANSACTION_STATUS_FAILED, + TRANSACTION_STATUS_NONE, ) from regluit.payment.signals import credit_balance_added, pledge_created @@ -97,7 +95,7 @@ class Transaction(models.Model): # error message from a transaction error = models.CharField(max_length=256, null=True) - # IPN.reason_code + # reason_code - originally an IPN thing, now used for fund identification reason = models.CharField(max_length=64, null=True) # creation and last modified timestamps @@ -115,10 +113,10 @@ class Transaction(models.Model): date_expired = models.DateTimeField(null=True) # associated User, Campaign, and Premium for this Transaction - user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True) - campaign = models.ForeignKey('core.Campaign', null=True) - premium = models.ForeignKey('core.Premium', null=True) - offer = models.ForeignKey('core.Offer', null=True) + user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, null=True) + campaign = models.ForeignKey('core.Campaign', on_delete=models.CASCADE, null=True) + premium = models.ForeignKey('core.Premium', on_delete=models.CASCADE, null=True) + offer = models.ForeignKey('core.Offer', on_delete=models.CASCADE, null=True) extra = JSONField(null=True, default={}) # whether the user wants to be not listed publicly @@ -145,7 +143,7 @@ def deadline_or_now(self): @property def needed_amount(self): - if self.user is None or self.user.is_anonymous(): + if self.user is None or self.user.is_anonymous: return self.max_amount if self.user.credit.available >= self.max_amount: return 0 @@ -153,7 +151,7 @@ def needed_amount(self): @property def credit_amount(self): - if self.user is None or self.user.is_anonymous(): + if self.user is None or self.user.is_anonymous: return 0 if self.user.credit.available >= self.max_amount: return self.max_amount @@ -168,7 +166,7 @@ def save(self, *args, **kwargs): self.secret = str(uuid.uuid1()) super(Transaction, self).save(*args, **kwargs) # Call the "real" save() method. - def __unicode__(self): + def __str__(self): return u"-- Transaction:\n \tstatus: %s\n \t amount: %s\n \terror: %s\n" % (self.status, str(self.amount), self.error) def create_receivers(self, receiver_list): @@ -232,8 +230,8 @@ class pe: @classmethod def create(cls, amount=0.00, host=PAYMENT_HOST_NONE, max_amount=0.00, currency='USD', status=TRANSACTION_STATUS_NONE, campaign=None, user=None, pledge_extra=None, - donation=False): - if user and user.is_anonymous(): + donation=False, reason=''): + if user and user.is_anonymous: user = None t = cls.objects.create( amount=amount, @@ -244,11 +242,16 @@ def create(cls, amount=0.00, host=PAYMENT_HOST_NONE, max_amount=0.00, currency=' campaign=campaign, user=user, donation=donation, + reason=reason, ) if pledge_extra: t.set_pledge_extra(pledge_extra) return t + def fund(self): + val = "general" if not self.reason else self.reason + return FUNDS.get(self.reason, {"name": ""}) + class PaymentResponse(models.Model): # The API used api = models.CharField(max_length=64, null=False) @@ -265,13 +268,13 @@ class PaymentResponse(models.Model): # local status specific to the api call status = models.CharField(max_length=32, null=True) - transaction = models.ForeignKey(Transaction, null=False) + transaction = models.ForeignKey(Transaction, on_delete=models.CASCADE, null=False) - def __unicode__(self): + def __str__(self): return u"PaymentResponse -- api: {0} correlation_id: {1} transaction: {2}".format( self.api, self.correlation_id, - unicode(self.transaction) + str(self.transaction) ) @@ -287,18 +290,18 @@ class Receiver(models.Model): reason = models.CharField(max_length=64) primary = models.BooleanField(default=True) txn_id = models.CharField(max_length=64) - transaction = models.ForeignKey(Transaction) + transaction = models.ForeignKey(Transaction, on_delete=models.CASCADE,) - def __unicode__(self): + def __str__(self): return u"Receiver -- email: {0} status: {1} transaction: {2}".format( self.email, self.status, - unicode(self.transaction) + str(self.transaction) ) class CreditLog(models.Model): # a write only record of Unglue.it Credit Transactions - user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True) + user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, null=True) amount = models.DecimalField(default=Decimal('0.00'), max_digits=14, decimal_places=2) # max 999,999,999,999.99 timestamp = models.DateTimeField(auto_now=True) action = models.CharField(max_length=16) @@ -306,7 +309,7 @@ class CreditLog(models.Model): sent = models.IntegerField(null=True) class Credit(models.Model): - user = models.OneToOneField(settings.AUTH_USER_MODEL, related_name='credit') + user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='credit') balance = models.DecimalField(default=Decimal('0.00'), max_digits=14, decimal_places=2) # max 999,999,999,999.99 pledged = models.DecimalField(default=Decimal('0.00'), max_digits=14, decimal_places=2) # max 999,999,999,999.99 last_activity = models.DateTimeField(auto_now=True) @@ -421,7 +424,7 @@ class Account(models.Model): date_deactivated = models.DateTimeField(null=True) # associated User if any - user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True) + user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, null=True) # status variable status = models.CharField(max_length=11, choices=STATUS_CHOICES, null=False, default='ACTIVE') diff --git a/payment/parameters.py b/payment/parameters.py index e76790838..64226289c 100644 --- a/payment/parameters.py +++ b/payment/parameters.py @@ -53,3 +53,8 @@ TRANSACTION_STATUS_WRITTEN_OFF = 'Written-Off' COMPANY_TITLE = 'Free Ebook Foundation' + +FUNDS = { + "general": {"name": "General Fund"}, + "monographs": {"name": "Monographs Fund"}, +} \ No newline at end of file diff --git a/payment/sales_credit.py b/payment/sales_credit.py index 3be5ca88a..b7dbd4b00 100644 --- a/payment/sales_credit.py +++ b/payment/sales_credit.py @@ -3,7 +3,7 @@ import datetime from decimal import Decimal as D #import Tkinter, tkFileDialog -from StringIO import StringIO +from io import StringIO from regluit.core.models import Campaign #root = Tkinter.Tk() @@ -24,11 +24,10 @@ campaign_id = description['tc.id'] try: campaign = Campaign.objects.get(id = campaign_id) - #print campaign.name except Campaign.DoesNotExist: - print 'missing campaign:'+ campaign_id + print('missing campaign:'+ campaign_id) continue - if royalties_due.has_key(campaign.id): + if campaign.id in royalties_due: royalty_due = royalties_due[campaign.id].royalty_due else: royalty_due = D(0) @@ -43,4 +42,4 @@ campaign.royalty_due=royalty_due royalties_due[campaign.id]=campaign for campaign in royalties_due.values(): - print campaign.rightsholder + ' '+campaign.name+' '+str(campaign.royalty_due) \ No newline at end of file + print(campaign.rightsholder + ' '+campaign.name+' '+str(campaign.royalty_due)) \ No newline at end of file diff --git a/payment/stripelib.py b/payment/stripelib.py index 072b3d7e6..064bd71f1 100644 --- a/payment/stripelib.py +++ b/payment/stripelib.py @@ -1,32 +1,28 @@ # https://github.com/stripe/stripe-python # https://stripe.com/docs/api?lang=python#top -""" -external library imports -""" + import logging import json - -from datetime import datetime, timedelta -from itertools import islice -from pytz import utc import re import unittest -from unittest import TestCase - + +from datetime import timedelta +from unittest import TestCase + import stripe -""" -django imports -""" + +#django imports + from django.conf import settings from django.core.mail import send_mail from django.http import HttpResponse from django.utils.timezone import now -""" -regluit imports -""" + +#regluit imports + from regluit.payment import baseprocessor from regluit.payment.models import Account, Transaction, PaymentResponse from regluit.payment.parameters import ( @@ -40,16 +36,14 @@ ) from regluit.payment.signals import transaction_charged, transaction_failed -# as of 2013.07.15 -# ['charge.disputed', 'coupon.updated'] are legacy events -- don't know whether to -# include them in list -STRIPE_EVENT_TYPES = ['account.updated', 'account.application.deauthorized', 'balance.available', +STRIPE_EVENT_TYPES = [ + 'account.updated', 'account.application.deauthorized', 'balance.available', 'charge.succeeded', 'charge.failed', 'charge.refunded', 'charge.captured', 'charge.dispute.created', 'charge.dispute.updated', 'charge.dispute.closed', 'customer.created', 'customer.updated', 'customer.deleted', 'customer.card.created', 'customer.card.updated', 'customer.card.deleted', - 'customer.source.created', 'customer.source.deleted', 'customer.source.expiring', + 'customer.source.created', 'customer.source.deleted', 'customer.source.expiring', 'customer.source.updated', 'customer.subscription.created', 'customer.subscription.updated', 'customer.subscription.deleted', 'customer.subscription.trial_will_end', @@ -58,50 +52,33 @@ 'invoice.payment_succeeded', 'invoice.payment_failed', 'invoiceitem.created', 'invoiceitem.updated', 'invoiceitem.deleted', 'plan.created', 'plan.updated', 'plan.deleted', 'coupon.created', 'coupon.deleted', 'transfer.created', - 'transfer.updated', 'transfer.paid', 'transfer.failed', 'ping'] + 'transfer.updated', 'transfer.paid', 'transfer.failed', 'payment_method.attached', + 'payment_method.card_automatically_updated', 'ping', +] logger = logging.getLogger(__name__) -# https://stackoverflow.com/questions/2348317/how-to-write-a-pager-for-python-iterators/2350904#2350904 +# https://stackoverflow.com/questions/2348317/how-to-write-a-pager-for-python-iterators/2350904#2350904 def grouper(iterable, page_size): - page= [] + page = [] for item in iterable: - page.append( item ) + page.append(item) if len(page) == page_size: yield page - page= [] - if len(page): + page = [] + if len(page) > 0: yield page class StripelibError(baseprocessor.ProcessorError): pass -# if customer.id doesn't exist, create one and then charge the customer -# we probably should ask our users whether they are ok with our creating a customer id account -- or ask for credit -# card info each time.... - -# should load the keys for Stripe from db -- but for now just hardcode here -# moving towards not having the stripe api key for the non profit partner in the unglue.it code -- but in a logically -# distinct application - -TEST_STRIPE_PK = 'pk_0EajXPn195ZdF7Gt7pCxsqRhNN5BF' -TEST_STRIPE_SK = 'sk_0EajIO4Dnh646KPIgLWGcO10f9qnH' - -try: - from regluit.core.models import Key - STRIPE_PK = Key.objects.get(name="STRIPE_PK").value - STRIPE_SK = Key.objects.get(name="STRIPE_SK").value - logger.info('Successful loading of STRIPE_*_KEYs') -except Exception, e: - # currently test keys for Gluejar and for raymond.yee@gmail.com as standin for non-profit - logger.info('Exception {0} Need to use TEST STRIPE_*_KEYs'.format(e)) - STRIPE_PK = TEST_STRIPE_PK - STRIPE_SK = TEST_STRIPE_SK - +STRIPE_PK = settings.STRIPE_PK +STRIPE_SK = settings.STRIPE_SK + # set default stripe api_key to that of unglue.it -stripe.api_key = STRIPE_SK +stripe.api_key = STRIPE_SK # maybe we should be able to set this in django.settings... @@ -115,8 +92,8 @@ class StripelibError(baseprocessor.ProcessorError): # https://stripe.com/docs/testing TEST_CARDS = ( - ('4242424242424242', 'Visa'), - ('4012888888881881', 'Visa'), + ('4242424242424242', 'Visa'), + ('4012888888881881', 'Visa'), ('5555555555554444', 'MasterCard'), ('5105105105105100', 'MasterCard'), ('378282246310005', 'American Express'), @@ -126,19 +103,23 @@ class StripelibError(baseprocessor.ProcessorError): ('30569309025904', "Diner's Club"), ('38520000023237', "Diner's Club"), ('3530111333300000', 'JCB'), - ('3566002020360505','JCB') + ('3566002020360505', 'JCB') ) ERROR_TESTING = dict(( - ('ADDRESS1_ZIP_FAIL', ('4000000000000010', 'address_line1_check and address_zip_check will both fail')), + ('ADDRESS1_ZIP_FAIL', + ('4000000000000010', 'address_line1_check and address_zip_check will both fail')), ('ADDRESS1_FAIL', ('4000000000000028', 'address_line1_check will fail.')), ('ADDRESS_ZIP_FAIL', ('4000000000000036', 'address_zip_check will fail.')), ('CVC_CHECK_FAIL', ('4000000000000101', 'cvc_check will fail.')), - ('BAD_ATTACHED_CARD', ('4000000000000341', 'Attaching this card to a Customer object will succeed, but attempts to charge the customer will fail.')), + ('BAD_ATTACHED_CARD', + ('4000000000000341', + 'Attaching this card to a Customer object will succeed, but attempts to charge the customer will fail.')), ('CHARGE_DECLINE', ('4000000000000002', 'Charges with this card will always be declined.')) )) -CARD_FIELDS_TO_COMPARE = ('exp_month', 'exp_year', 'name', 'address_line1', 'address_line2', 'address_zip', 'address_state') +CARD_FIELDS_TO_COMPARE = ('exp_month', 'exp_year', 'name', 'address_line1', 'address_line2', + 'address_zip', 'address_state') # types of errors / when they can be handled @@ -150,9 +131,10 @@ class StripelibError(baseprocessor.ProcessorError): def filter_none(d): - return dict([(k,v) for (k,v) in d.items() if v is not None]) - -# if you create a Customer object, then you'll be able to charge multiple times. You can create a customer with a token. + return dict([(k, v) for (k, v) in d.items() if v is not None]) + +# if you create a Customer object, then you'll be able to charge multiple times. +# You can create a customer with a token. # https://en.wikipedia.org/wiki/Luhn_algorithm#Implementation_of_standard_Mod_10 @@ -167,18 +149,19 @@ def digits_of(n): for d in even_digits: checksum += sum(digits_of(d*2)) return checksum % 10 - + def is_luhn_valid(card_number): return luhn_checksum(card_number) == 0 - - + + # https://stripe.com/docs/tutorials/charges -def card (number=TEST_CARDS[0][0], exp_month=1, exp_year=2020, cvc=None, name=None, - address_line1=None, address_line2=None, address_zip=None, address_state=None, address_country=None): - +def card(number=TEST_CARDS[0][0], exp_month=1, exp_year=2030, cvc=None, name=None, + address_line1=None, address_line2=None, address_zip=None, address_state=None, + address_country=None): + """Note: there is no place to enter address_city in the API""" - + card = { "number": number, "exp_month": int(exp_month), @@ -191,13 +174,13 @@ def card (number=TEST_CARDS[0][0], exp_month=1, exp_year=2020, cvc=None, name=No "address_state": address_state, "address_country": address_country } - + return filter_none(card) def _isListableAPIResource(x): """test whether x is an instance of the stripe.ListableAPIResource class""" try: - return issubclass(x, stripe.ListableAPIResource) + return issubclass(x, stripe.abstract.ListableAPIResource) except: return False @@ -205,10 +188,10 @@ def _isListableAPIResource(x): class StripeClient(object): def __init__(self, api_key=STRIPE_SK): self.api_key = api_key - + # key entities: Charge, Customer, Token, Event - - @property + + @property def charge(self): return stripe.Charge(api_key=self.api_key) @@ -219,22 +202,23 @@ def customer(self): @property def token(self): return stripe.Token(api_key=self.api_key) - + @property def transfer(self): return stripe.Transfer(api_key=self.api_key) - - @property + + @property def event(self): return stripe.Event(api_key=self.api_key) - + def create_token(self, card): return stripe.Token(api_key=self.api_key).create(card=card) - def create_customer(self, card=None, description=None, email=None, account_balance=None, plan=None, trial_end=None): + def create_customer(self, card=None, description=None, email=None, account_balance=None, + plan=None, trial_end=None): """card is a dictionary or a token""" # https://stripe.com/docs/api?lang=python#create_customer - + customer = stripe.Customer(api_key=self.api_key).create( card=card, description=description, @@ -243,17 +227,17 @@ def create_customer(self, card=None, description=None, email=None, account_balan plan=plan, trial_end=trial_end ) - + # customer.id is useful to save in db return customer - def create_charge(self, amount, currency="usd", customer=None, card=None, description=None ): + def create_charge(self, amount, currency="usd", customer=None, card=None, description=None): # https://stripe.com/docs/api?lang=python#create_charge # customer.id or card required but not both # charge the Customer instead of the card # amount in cents - + charge = stripe.Charge(api_key=self.api_key).create( amount=int(100*amount), # in cents currency=currency, @@ -261,20 +245,20 @@ def create_charge(self, amount, currency="usd", customer=None, card=None, descri card=card, description=description ) - + return charge def refund_charge(self, charge_id): # https://stripe.com/docs/api?lang=python#refund_charge ch = stripe.Charge(api_key=self.api_key).retrieve(charge_id) ch.refund() - return ch - + return ch + def _all_objs(self, class_type, **kwargs): """a generic iterator for all classes of type stripe.ListableAPIResource""" # type=None, created=None, count=None, offset=0 - # obj_type: one of 'Charge','Coupon','Customer', 'Event','Invoice', 'InvoiceItem', 'Plan', 'Transfer' - + # obj_type: one of 'Charge','Coupon','Customer', 'Event','Invoice', + # 'InvoiceItem', 'Plan', 'Transfer' try: stripe_class = getattr(stripe, class_type) except: @@ -283,63 +267,63 @@ def _all_objs(self, class_type, **kwargs): if _isListableAPIResource(stripe_class): kwargs2 = kwargs.copy() kwargs2.setdefault('offset', 0) - kwargs2.setdefault('count', 100) - + kwargs2.setdefault('count', 100) + more_items = True while more_items: - - items = stripe_class(api_key=self.api_key).all(**kwargs2)['data'] + + items = stripe_class(api_key=self.api_key).list(**kwargs2)['data'] for item in items: yield item - if len(items): + if len(items) > 0: kwargs2['offset'] += len(items) else: more_items = False else: yield StopIteration - + def __getattribute__(self, name): """ handle list_* calls""" mapping = {'list_charges':"Charge", 'list_coupons': "Coupon", - 'list_customers':"Customer", + 'list_customers':"Customer", 'list_events':"Event", 'list_invoices':"Invoice", 'list_invoiceitems':"InvoiceItem", 'list_plans':"Plan", - 'list_transfers':"Transfer" - } + 'list_transfers':"Transfer"} if name in mapping.keys(): class_value = mapping[name] def list_events(**kwargs): for e in self._all_objs(class_value, **kwargs): - yield e - return list_events - else: - return object.__getattribute__(self, name) - - + yield e + return list_events + return object.__getattribute__(self, name) + + + - # can't test Transfer in test mode: "There are no transfers in test mode." -#pledge scenario +# pledge scenario # bad card -- what types of erros to handle? # https://stripe.com/docs/api#errors # https://stripe.com/docs/api#event_types -# events of interest -- especially ones that do not directly arise immediately (synchronously) from something we do -- I think -# especially: charge.disputed -# I think following (charge.succeeded, charge.failed, charge.refunded) pretty much sychronous to our actions -# customer.created, customer.updated, customer.deleted +# events of interest -- especially ones that do not directly arise immediately (synchronously) +# from something we do -- I think especially: charge.disputed +# I think following (charge.succeeded, charge.failed, charge.refunded) pretty much sychronous +# to our actions customer.created, customer.updated, customer.deleted # transfer -# I expect the ones related to transfers all happen asynchronously: transfer.created, transfer.updated, transfer.failed +# I expect the ones related to transfers all happen asynchronously: +# transfer.created, transfer.updated, transfer.failed # When will the money I charge with Stripe end up in my bank account? -# Every day, we transfer the money that you charged seven days previously?that is, you receive the money for your March 1st charges on March 8th. +# Every day, we transfer the money that you charged seven days previously? that is, +# you receive the money for your March 1st charges on March 8th. # pending payments? # how to tell whether money transferred to bank account yet @@ -348,7 +332,7 @@ def list_events(**kwargs): # Errors we still need to catch: # -# * invalid_number -- can't get stripe to generate for us. What it means: +# * invalid_number -- can't get stripe to generate for us. What it means: # # * that the card has been cancelled (or never existed to begin with # @@ -356,13 +340,14 @@ def list_events(**kwargs): # # * the first 6 digits point to a valid bank # -# * but the account number (the rest of the digits) doesn't correspond to a credit account with that bank -# +# * but the account number (the rest of the digits) doesn't correspond to a credit account +# with that bank # * Brian of stripe.com suggests we could treat it the same way as we'd treat card_declined # # * processing_error: # -# * means: something went wrong when stripe tried to make the charge (it could be that the card's issuing bank is down, or our connection to the bank isn't working properly) +# * means: something went wrong when stripe tried to make the charge (it could be that the +# card's issuing bank is down, or our connection to the bank isn't working properly) # * we can retry -- e.g., a minute later, then 30 minutes, then an hour, 3 hours, a day. # * we shouldn't see processing_error very often # @@ -372,23 +357,25 @@ def list_events(**kwargs): class StripeErrorTest(TestCase): """Make sure the exceptions returned by stripe act as expected""" - + def test_cc_test_numbers_luhn_valid(self): - """Show that the test CC numbers supplied for testing as valid numbers are indeed Luhn valid""" + """Show that the test CC numbers supplied for testing as valid + numbers are indeed Luhn valid""" self.assertTrue(all([is_luhn_valid(c[0]) for c in ERROR_TESTING.values()])) - + def test_good_token(self): """ verify normal operation """ sc = StripeClient() - card1 = card(number=TEST_CARDS[0][0], exp_month=1, exp_year='2020', cvc='123', name='Don Giovanni', - address_line1="100 Jackson St.", address_line2="", address_zip="94706", address_state="CA", address_country=None) # good card + card1 = card(number=TEST_CARDS[0][0], exp_month=1, exp_year='2020', cvc='123', + name='Don Giovanni', address_line1="100 Jackson St.", address_line2="", + address_zip="94706", address_state="CA", address_country=None) # good card token1 = sc.create_token(card=card1) # use the token id -- which is what we get from JavaScript api -- and retrieve the token token2 = sc.token.retrieve(id=token1.id) self.assertEqual(token2.id, token1.id) # make sure token id has a form tok_ self.assertEqual(token2.id[:4], "tok_") - + # should be only test mode self.assertEqual(token2.livemode, False) # token hasn't been used yet @@ -397,21 +384,21 @@ def test_good_token(self): for k in CARD_FIELDS_TO_COMPARE: self.assertEqual(token2.card[k], card1[k]) # last4 - self.assertEqual(token2.card.last4, TEST_CARDS[0][0][-4:]) + self.assertEqual(token2.card.last4, TEST_CARDS[0][0][-4:]) # fingerprint self.assertGreaterEqual(len(token2.card.fingerprint), 16) - + # now charge the token charge1 = sc.create_charge(10, 'usd', card=token2.id) self.assertEqual(charge1.amount, 1000) self.assertEqual(charge1.id[:3], "ch_") # dispute, failure_message, fee, fee_details - self.assertEqual(charge1.dispute,None) - self.assertEqual(charge1.failure_message,None) - self.assertEqual(charge1.fee,59) - self.assertEqual(charge1.refunded,False) - - + self.assertEqual(charge1.dispute, None) + self.assertEqual(charge1.failure_message, None) + self.assertEqual(charge1.fee, 59) + self.assertEqual(charge1.refunded, False) + + def test_error_creating_customer_with_declined_card(self): """Test whether we can get a charge decline error""" sc = StripeClient() @@ -419,513 +406,544 @@ def test_error_creating_customer_with_declined_card(self): try: cust1 = sc.create_customer(card=card1, description="This card should fail") self.fail("Attempt to create customer did not throw expected exception.") - except stripe.CardError as e: + except stripe.error.CardError as e: self.assertEqual(e.code, "card_declined") - self.assertEqual(e.message, "Your card was declined") - + self.assertEqual(e.args[0], "Your card was declined") + def test_charge_bad_cust(self): # expect the card to be declined -- and for us to get CardError sc = StripeClient() # bad card card1 = card(number=ERROR_TESTING['BAD_ATTACHED_CARD'][0]) # attaching card should be ok - cust1 = sc.create_customer(card=card1, description="test bad customer", email="rdhyee@gluejar.com") + cust1 = sc.create_customer(card=card1, description="test bad customer", + email="rdhyee@gluejar.com") # trying to charge the card should fail - self.assertRaises(stripe.CardError, sc.create_charge, 10, - customer = cust1.id, description="$10 for bad cust") + self.assertRaises(stripe.error.CardError, sc.create_charge, 10, + customer=cust1.id, description="$10 for bad cust") def test_bad_cc_number(self): """send a bad cc and should get an error when trying to create a token""" BAD_CC_NUM = '4242424242424241' - + # reason for decline is number is not Luhn valid self.assertFalse(is_luhn_valid(BAD_CC_NUM)) - + sc = StripeClient() card1 = card(number=BAD_CC_NUM, exp_month=1, exp_year=2020, cvc='123', name='Don Giovanni', - address_line1="100 Jackson St.", address_line2="", address_zip="94706", address_state="CA", address_country=None) # good card - + address_line1="100 Jackson St.", address_line2="", address_zip="94706", + address_state="CA", address_country=None) # good card + try: token1 = sc.create_token(card=card1) self.fail("Attempt to create token with bad cc number did not throw expected exception.") - except stripe.CardError as e: + except stripe.error.CardError as e: self.assertEqual(e.code, "incorrect_number") - self.assertEqual(e.message, "Your card number is incorrect") - + self.assertEqual(e.args[0], "Your card number is incorrect") + def test_invalid_expiry_month(self): """Use an invalid month e.g. 13.""" - + sc = StripeClient() - card1 = card(number=TEST_CARDS[0][0], exp_month=13, exp_year=2020, cvc='123', name='Don Giovanni', - address_line1="100 Jackson St.", address_line2="", address_zip="94706", address_state="CA", address_country=None) + card1 = card(number=TEST_CARDS[0][0], exp_month=13, exp_year=2020, cvc='123', + name='Don Giovanni', address_line1="100 Jackson St.", address_line2="", + address_zip="94706", address_state="CA", address_country=None) try: token1 = sc.create_token(card=card1) self.fail("Attempt to create token with invalid expiry month did not throw expected exception.") - except stripe.CardError as e: + except stripe.error.CardError as e: self.assertEqual(e.code, "invalid_expiry_month") - self.assertEqual(e.message, "Your card's expiration month is invalid") + self.assertEqual(e.args[0], "Your card's expiration month is invalid") def test_invalid_expiry_year(self): """Use a year in the past e.g. 1970.""" - + sc = StripeClient() - card1 = card(number=TEST_CARDS[0][0], exp_month=12, exp_year=1970, cvc='123', name='Don Giovanni', - address_line1="100 Jackson St.", address_line2="", address_zip="94706", address_state="CA", address_country=None) + card1 = card(number=TEST_CARDS[0][0], exp_month=12, exp_year=1970, cvc='123', + name='Don Giovanni', address_line1="100 Jackson St.", address_line2="", + address_zip="94706", address_state="CA", address_country=None) try: token1 = sc.create_token(card=card1) self.fail("Attempt to create token with invalid expiry year did not throw expected exception.") - except stripe.CardError as e: + except stripe.error.CardError as e: self.assertEqual(e.code, "invalid_expiry_year") - self.assertEqual(e.message, "Your card's expiration year is invalid") - + self.assertEqual(e.args[0], "Your card's expiration year is invalid") + def test_invalid_cvc(self): """Use a two digit number e.g. 99.""" - + sc = StripeClient() - card1 = card(number=TEST_CARDS[0][0], exp_month=12, exp_year=2020, cvc='99', name='Don Giovanni', - address_line1="100 Jackson St.", address_line2="", address_zip="94706", address_state="CA", address_country=None) + card1 = card(number=TEST_CARDS[0][0], exp_month=12, exp_year=2020, cvc='99', + name='Don Giovanni', address_line1="100 Jackson St.", address_line2="", + address_zip="94706", address_state="CA", address_country=None) try: token1 = sc.create_token(card=card1) self.fail("Attempt to create token with invalid cvc did not throw expected exception.") - except stripe.CardError as e: + except stripe.error.CardError as e: self.assertEqual(e.code, "invalid_cvc") - self.assertEqual(e.message, "Your card's security code is invalid") - + self.assertEqual(e.args[0], "Your card's security code is invalid") + def test_missing_card(self): """There is no card on a customer that is being charged""" - + sc = StripeClient() # create a Customer with no attached card cust1 = sc.create_customer(description="test cust w/ no card") try: - sc.create_charge(10, customer = cust1.id, description="$10 for cust w/ no card") - except stripe.CardError as e: + sc.create_charge(10, customer=cust1.id, description="$10 for cust w/ no card") + except stripe.error.CardError as e: self.assertEqual(e.code, "missing") - self.assertEqual(e.message, "Cannot charge a customer that has no active card") - + self.assertEqual(e.args[0], "Cannot charge a customer that has no active card") + class PledgeScenarioTest(TestCase): @classmethod def setUpClass(cls): cls._sc = StripeClient(api_key=STRIPE_SK) - + # valid card card0 = card() - cls._good_cust = cls._sc.create_customer(card=card0, description="test good customer", email="raymond.yee@gmail.com") - + cls._good_cust = cls._sc.create_customer(card=card0, description="test good customer", + email="raymond.yee@gmail.com") + # bad card test_card_num_to_get_BAD_ATTACHED_CARD = ERROR_TESTING['BAD_ATTACHED_CARD'][0] card1 = card(number=test_card_num_to_get_BAD_ATTACHED_CARD) - cls._cust_bad_card = cls._sc.create_customer(card=card1, description="test bad customer", email="rdhyee@gluejar.com") - + cls._cust_bad_card = cls._sc.create_customer(card=card1, description="test bad customer", + email="rdhyee@gluejar.com") + def test_charge_good_cust(self): - charge = self._sc.create_charge(10, customer=self._good_cust.id, description="$10 for good cust") - self.assertEqual(type(charge.id), unicode) + charge = self._sc.create_charge(10, customer=self._good_cust.id, + description="$10 for good cust") + self.assertEqual(type(charge.id), str) # print out all the pieces of Customer and Charge objects - print dir(charge) - print dir(self._good_cust) - + print(dir(charge)) + print(dir(self._good_cust)) + def test_error_creating_customer_with_declined_card(self): # should get a CardError upon attempt to create Customer with this card _card = card(number=card(ERROR_TESTING['CHARGE_DECLINE'][0])) - self.assertRaises(stripe.CardError, self._sc.create_customer, card=_card) - + self.assertRaises(stripe.error.CardError, self._sc.create_customer, card=_card) + def test_charge_bad_cust(self): # expect the card to be declined -- and for us to get CardError - self.assertRaises(stripe.CardError, self._sc.create_charge, 10, - customer = self._cust_bad_card.id, description="$10 for bad cust") - + self.assertRaises(stripe.error.CardError, self._sc.create_charge, 10, + customer=self._cust_bad_card.id, description="$10 for bad cust") + @classmethod def tearDownClass(cls): # clean up stuff we create in test -- right now list current objects pass - + #cls._good_cust.delete() - - #print "list of customers" - #print [(i, c.id, c.description, c.email, datetime.fromtimestamp(c.created, tz=utc), c.account_balance, c.delinquent, c.active_card.fingerprint, c.active_card.type, c.active_card.last4, c.active_card.exp_month, c.active_card.exp_year, c.active_card.country) for(i, c) in enumerate(cls._sc.customer.all()["data"])] - # - #print "list of charges" - #print [(i, c.id, c.amount, c.amount_refunded, c.currency, c.description, datetime.fromtimestamp(c.created, tz=utc), c.paid, c.fee, c.disputed, c.amount_refunded, c.failure_message, c.card.fingerprint, c.card.type, c.card.last4, c.card.exp_month, c.card.exp_year) for (i, c) in enumerate(cls._sc.charge.all()['data'])] - # - ## can retrieve events since a certain time? - #print "list of events", cls._sc.event.all() - #print [(i, e.id, e.type, e.created, e.pending_webhooks, e.data) for (i,e) in enumerate(cls._sc.event.all()['data'])] class StripePaymentRequest(baseprocessor.BasePaymentRequest): """so far there is no need to have a separate class here""" pass class Processor(baseprocessor.Processor): - + def make_account(self, user=None, token=None, email=None): """returns a payment.models.Account based on stripe token and user""" - + if token is None or len(token) == 0: - raise StripelibError("input token is None", None) + raise StripelibError("input token is None", None) sc = StripeClient() - + # create customer and charge id and then charge the customer try: if user: customer = sc.create_customer(card=token, description=user.username, - email=user.email) + email=user.email) else: customer = sc.create_customer(card=token, description='anonymous user', email=email) - except stripe.StripeError as e: - raise StripelibError(e.message, e) - - account = Account(host = PAYMENT_HOST_STRIPE, - account_id = customer.id, - card_last4 = customer.active_card.last4, - card_type = customer.active_card.type, - card_exp_month = customer.active_card.exp_month, - card_exp_year = customer.active_card.exp_year, - card_fingerprint = customer.active_card.fingerprint, - card_country = customer.active_card.country, - user = user + except stripe.error.StripeError as e: + raise StripelibError(e.args, e) + + account = Account(host=PAYMENT_HOST_STRIPE, + account_id=customer.id, + card_last4=customer.active_card.last4, + card_type=customer.active_card.type, + card_exp_month=customer.active_card.exp_month, + card_exp_year=customer.active_card.exp_year, + card_fingerprint=customer.active_card.fingerprint, + card_country=customer.active_card.country, + user=user ) if user and user.profile.account: user.profile.account.deactivate() - account.save() - account.recharge_failed_transactions() + account.save() + account.recharge_failed_transactions() else: account.save() return account - + class Preapproval(StripePaymentRequest, baseprocessor.Processor.Preapproval): - - def __init__( self, transaction, amount, expiry=None, return_url=None, paymentReason=""): - - # set the expiration date for the preapproval if not passed in. This is what the paypal library does - + + def __init__(self, transaction, amount, expiry=None, return_url=None, paymentReason=""): + + # set the expiration date for the preapproval if not passed in. + # This is what the paypal library does + self.transaction = transaction - + now_val = now() if expiry is None: - expiry = now_val + timedelta( days=settings.PREAPPROVAL_PERIOD ) + expiry = now_val + timedelta(days=settings.PREAPPROVAL_PERIOD) transaction.date_authorized = now_val transaction.date_expired = expiry - + # let's figure out what part of transaction can be used to store info # try placing charge id in transaction.pay_key # need to set amount - # how does transaction.max_amount get set? -- coming from /pledge/xxx/ -> manager.process_transaction + # how does transaction.max_amount get set? + # -- coming from /pledge/xxx/ -> manager.process_transaction # max_amount is set -- but I don't think we need it for stripe - + # ASSUMPTION: a user has any given moment one and only one active payment Account - + account = transaction.user.profile.account if not account: - logger.warning("user {0} has no active payment account".format(transaction.user)) - raise StripelibError("user {0} has no active payment account".format(transaction.user)) - - logger.info("user: {0} customer.id is {1}".format(transaction.user, account.account_id)) - + logger.warning("user %s has no active payment account", transaction.user) + raise StripelibError("user %s has no active payment account" % transaction.user) + + logger.info("user: %s customer.id is %s", transaction.user, account.account_id) + # settings to apply to transaction for TRANSACTION_STATUS_ACTIVE # should approved be set to False and wait for a webhook? transaction.approved = True transaction.type = PAYMENT_TYPE_AUTHORIZATION transaction.host = PAYMENT_HOST_STRIPE transaction.status = TRANSACTION_STATUS_ACTIVE - + transaction.preapproval_key = account.account_id - + transaction.currency = 'USD' transaction.amount = amount - + transaction.save() - + def key(self): return self.transaction.preapproval_key - + def next_url(self): """return None because no redirection to stripe is required""" return None - + class Pay(StripePaymentRequest, baseprocessor.Processor.Pay): - - ''' + ''' The pay function generates a redirect URL to approve the transaction If the transaction has a null user (is_anonymous), then a token musr be supplied - ''' - - def __init__( self, transaction, return_url=None, amount=None, paymentReason="", token=None): - self.transaction=transaction - self.url = return_url - - now_val = now() - transaction.date_authorized = now_val - - # ASSUMPTION: a user has any given moment one and only one active payment Account - if token: - # user is anonymous - account = transaction.get_payment_class().make_account(token = token, email = transaction.receipt) - else: - account = transaction.user.profile.account - - if not account: - logger.warning("user {0} has no active payment account".format(transaction.user)) - raise StripelibError("user {0} has no active payment account".format(transaction.user)) - - logger.info("user: {0} customer.id is {1}".format(transaction.user, account.account_id)) - - # settings to apply to transaction for TRANSACTION_STATUS_ACTIVE - # should approved be set to False and wait for a webhook? - transaction.approved = True - transaction.type = PAYMENT_TYPE_INSTANT - transaction.host = PAYMENT_HOST_STRIPE - - transaction.preapproval_key = account.account_id - - transaction.currency = 'USD' - transaction.amount = amount - - transaction.save() - - # execute the transaction - p = transaction.get_payment_class().Execute(transaction) - - if p.success() and not p.error(): - transaction.pay_key = p.key() + ''' + + def __init__(self, transaction, return_url=None, amount=None, paymentReason="", token=None): + self.transaction = transaction + self.url = return_url + + now_val = now() + transaction.date_authorized = now_val + + # ASSUMPTION: a user has any given moment one and only one active payment Account + if token: + # user is anonymous + try: + account = transaction.get_payment_class().make_account( + token=token, email=transaction.receipt) + except StripelibError as e: + self.errorMessage = str(e) + return + else: + account = transaction.user.profile.account + + if not account: + logger.warning("user %s has no active payment account", transaction.user) + raise StripelibError("user %s has no active payment account" % transaction.user) + + logger.info("user: %s customer.id is %s", transaction.user, account.account_id) + + # settings to apply to transaction for TRANSACTION_STATUS_ACTIVE + # should approved be set to False and wait for a webhook? + transaction.approved = True + transaction.type = PAYMENT_TYPE_INSTANT + transaction.host = PAYMENT_HOST_STRIPE + + transaction.preapproval_key = account.account_id + + transaction.currency = 'USD' + transaction.amount = amount + transaction.save() - else: - self.errorMessage = p.errorMessage #pass error message up - logger.info("execute_transaction Error: " + p.error_string()) - - def amount( self ): - return self.transaction.amount - - def key( self ): - return self.transaction.pay_key - - def next_url( self ): - return self.url - + + # execute the transaction + p = transaction.get_payment_class().Execute(transaction) + + if p.success() and not p.error(): + transaction.pay_key = p.key() + transaction.save() + else: + self.errorMessage = p.errorMessage #pass error message up + logger.info("execute_transaction Error: %s", p.error_string()) + + def amount(self): + return self.transaction.amount + + def key(self): + return self.transaction.pay_key + + def next_url(self): + return self.url + class Execute(StripePaymentRequest): - + ''' - The Execute function attempts to charge the credit card of stripe Customer associated with user connected to transaction. + The Execute function attempts to charge the credit card of stripe Customer associated + with user connected to transaction. ''' - + def __init__(self, transaction=None): - + self.transaction = transaction - + # make sure transaction hasn't already been executed if transaction.status == TRANSACTION_STATUS_COMPLETE: return # make sure we are dealing with a stripe transaction - if transaction.host <> PAYMENT_HOST_STRIPE: - raise StripelibError("transaction.host {0} is not the expected {1}".format(transaction.host, PAYMENT_HOST_STRIPE)) - + if transaction.host != PAYMENT_HOST_STRIPE: + raise StripelibError( + "transaction.host %s is not the expected %s".format( + transaction.host, + PAYMENT_HOST_STRIPE) + ) + sc = StripeClient() - + # look first for transaction.user.profile.account.account_id try: customer_id = transaction.user.profile.account.account_id except: customer_id = transaction.preapproval_key - - if customer_id is not None: + + if customer_id is not None: try: - # useful things to put in description: transaction.id, transaction.user.id, customer_id, transaction.amount - charge = sc.create_charge(transaction.amount, customer=customer_id, - description=json.dumps({"t.id":transaction.id, - "email":transaction.user.email if transaction.user else transaction.receipt, - "cus.id":customer_id, - "tc.id": transaction.campaign.id if transaction.campaign else '0', - "amount": float(transaction.amount)})) - except stripe.StripeError as e: + # useful things to put in description: transaction.id, transaction.user.id, + # customer_id, transaction.amount + charge = sc.create_charge( + transaction.amount, customer=customer_id, + description=json.dumps( + {"t.id": transaction.id, + "email": transaction.user.email if transaction.user else transaction.receipt, + "cus.id":customer_id, + "tc.id": transaction.campaign.id if transaction.campaign else '0', + "amount": float(transaction.amount)} + ) + ) + except stripe.error.StripeError as e: # what to record in terms of errors? (error log?) # use PaymentResponse to store error r = PaymentResponse.objects.create(api="stripelib.Execute", correlation_id=None, - timestamp=now(), info=e.message, - status=TRANSACTION_STATUS_ERROR, transaction=transaction) - - transaction.status = TRANSACTION_STATUS_ERROR - self.errorMessage = e.message # manager puts this on transaction + timestamp=now(), info=e.args[0], + status=TRANSACTION_STATUS_ERROR, + transaction=transaction) + + transaction.status = TRANSACTION_STATUS_ERROR + self.errorMessage = e.args # manager puts this on transaction transaction.save() - # fire off the fact that transaction failed -- should actually do so only if not a transient error + # fire off the fact that transaction failed -- should actually do so + # only if not a transient error # if card_declined or expired card, ask user to update account - if isinstance(e, stripe.CardError) and e.code in ('card_declined', 'expired_card', 'incorrect_number', 'processing_error'): + if isinstance(e, stripe.error.CardError) and e.code in ( + 'card_declined', 'expired_card', 'incorrect_number', + 'processing_error'): transaction_failed.send(sender=self, transaction=transaction) # otherwise, report exception to us else: - logger.exception("transaction id {0}, exception: {1}".format(transaction.id, e.message)) - - # raise StripelibError(e.message, e) - + logger.exception("transaction id %s, exception: %s", transaction.id, e.args) + else: self.charge = charge - + transaction.status = TRANSACTION_STATUS_COMPLETE transaction.pay_key = charge.id transaction.date_payment = now() transaction.save() - + # fire signal for sucessful transaction transaction_charged.send(sender=self, transaction=transaction) - + else: # nothing to charge - raise StripelibError("No customer id available to charge for transaction {0}".format(transaction.id), None) - - + raise StripelibError( + "No customer id available to charge for transaction {0}".format(transaction.id), + None) + + def api(self): return "Base Pay" - + def key(self): - # IN paypal land, our key is updated from a preapproval to a pay key here, just return the existing key + # IN paypal land, our key is updated from a preapproval to a pay key here, + # just return the existing key return self.transaction.pay_key - + class PreapprovalDetails(StripePaymentRequest): ''' Get details about an authorized token - + This api must set 4 different class variables to work with the code in manager.py - + status - one of the global transaction status codes approved - boolean value currency - not used in this API, but we can get some more info via other APIs - TODO amount - not used in this API, but we can get some more info via other APIs - TODO - + ''' def __init__(self, transaction): - + self.transaction = transaction self.status = self.transaction.status if self.status == TRANSACTION_STATUS_CANCELED: self.approved = False else: self.approved = True - - # Set the other fields that are expected. We don't have values for these now, so just copy the transaction + + # Set the other fields that are expected. We don't have values for these now, + # so just copy the transaction self.currency = transaction.currency self.amount = transaction.amount - + def ProcessIPN(self, request): # retrieve the request's body and parse it as JSON in, e.g. Django try: event_json = json.loads(request.body) - except ValueError, e: + except ValueError as e: # not able to parse request.body -- throw a "Bad Request" error - logger.warning("Non-json being sent to Stripe IPN: {0}".format(e)) + logger.warning("Non-json being sent to Stripe IPN: %s", e) return HttpResponse(status=400) else: # now parse out pieces of the webhook event_id = event_json.get("id") # use Stripe to ask for details -- ignore what we're sent for security - + sc = StripeClient() try: event = sc.event.retrieve(event_id) - except stripe.InvalidRequestError: - logger.warning("Invalid Event ID: {0}".format(event_id)) + except stripe.error.InvalidRequestError: + logger.warning("Invalid Event ID: %s", event_id) return HttpResponse(status=400) else: event_type = event.get("type") if event_type not in STRIPE_EVENT_TYPES: - logger.warning("Unrecognized Stripe event type {0} for event {1}".format(event_type, event_id)) + logger.warning("Unrecognized Stripe event type %s for event %s", + event_type, event_id) # is this the right code to respond with? return HttpResponse(status=400) # https://stripe.com/docs/api?lang=python#event_types -- type to delegate things # parse out type as resource.action try: - (resource, action) = re.match("^(.+)\.([^\.]*)$", event_type).groups() - except Exception, e: - logger.warning("Parsing of event_type into resource, action failed: {0}".format(e)) + (resource, action) = re.match(r"^(.+)\.([^\.]*)$", event_type).groups() + except Exception as e: + logger.warning("Parsing of event_type into resource, action failed: %s", e) return HttpResponse(status=400) - + try: ev_object = event.data.object - except Exception, e: - logger.warning("attempt to retrieve event object failed: {0}".format(e)) + except Exception as e: + logger.warning("attempt to retrieve event object failed: %s", e) return HttpResponse(status=400) - + if event_type == 'account.updated': # should we alert ourselves? # how about account.application.deauthorized ? pass elif resource == 'charge': # we need to handle: succeeded, failed, refunded, disputed - + if action == 'succeeded': - # TO DO: delete this logic since we don't do anything but look up transaction. - logger.info("charge.succeeded webhook for {0}".format(ev_object.get("id"))) + # TO DO: delete this logic since we don't do anything + # but look up transaction. + logger.info("charge.succeeded webhook for %s", ev_object.get("id")) # try to parse description of object to pull related transaction if any - # wrapping this in a try statement because it possible that we have a charge.succeeded outside context of unglue.it + # wrapping this in a try statement because it possible that we have a + # charge.succeeded outside context of unglue.it try: charge_meta = json.loads(ev_object["description"]) transaction = Transaction.objects.get(id=charge_meta["t.id"]) # now check that account associated with the transaction matches # ev.data.object.id, t.pay_key if ev_object.id == transaction.pay_key: - logger.info("ev_object.id == transaction.pay_key: {0}".format(ev_object.id)) + logger.info("ev_object.id == transaction.pay_key: %s", ev_object.id) else: - logger.warning("ev_object.id {0} <> transaction.pay_key {1}".format(ev_object.id, transaction.pay_key)) - except Exception, e: - logger.warning(e) - + logger.warning("ev_object.id %s != transaction.pay_key %s", + ev_object.id, transaction.pay_key) + except Exception as e: + logger.warning(e) + elif action == 'failed': - # TO DO: delete this logic since we don't do anything but look up transaction. - logger.info("charge.failed webhook for {0}".format(ev_object.get("id"))) + # TO DO: delete this logic since we don't do anything but + # look up transaction. + logger.info("charge.failed webhook for %s", ev_object.get("id")) try: charge_meta = json.loads(ev_object["description"]) transaction = Transaction.objects.get(id=charge_meta["t.id"]) # now check that account associated with the transaction matches # ev.data.object.id, t.pay_key if ev_object.id == transaction.pay_key: - logger.info("ev_object.id == transaction.pay_key: {0}".format(ev_object.id)) + logger.info("ev_object.id == transaction.pay_key: %s", ev_object.id) else: - logger.warning("ev_object.id {0} <> transaction.pay_key {1}".format(ev_object.id, transaction.pay_key)) + logger.warning("ev_object.id %s != transaction.pay_key %s", + ev_object.id, transaction.pay_key) - except Exception, e: + except Exception as e: logger.warning(e) elif action == 'refunded': pass elif action == 'disputed': - pass + pass else: # unexpected pass elif resource == 'customer': if action == 'created': - # test application: email Raymond - # do we have a flag to indicate production vs non-production? -- or does it matter? - # email RY whenever a new Customer created -- we probably want to replace this with some other + # test application: email support + # do we have a flag to indicate production vs non-production? + # -- or does it matter? + # email support whenever a new Customer created + # -- we probably want to replace this with some other # more useful long tem action. - send_mail(u"Stripe Customer (id {0}; description: {1}) created".format(ev_object.get("id"), - ev_object.get("description")), - u"Stripe Customer email: {0}".format(ev_object.get("email")), - "notices@gluejar.com", - ["rdhyee@gluejar.com"]) - logger.info("email sent for customer.created for {0}".format(ev_object.get("id"))) + send_mail( + "Stripe Customer (id {0}; description: {1}) created".format( + ev_object.get("id"), + ev_object.get("description")), + "Stripe Customer email: {0}".format(ev_object.get("email")), + "notices@gluejar.com", + ["unglueit@ebookfoundation.org"]) + logger.info("email sent for customer.created for %s", ev_object.get("id")) # handle updated, deleted else: pass else: # other events pass - + return HttpResponse("event_id: {0} event_type: {1}".format(event_id, event_type)) def suite(): - + testcases = [PledgeScenarioTest, StripeErrorTest] #testcases = [StripeErrorTest] - suites = unittest.TestSuite([unittest.TestLoader().loadTestsFromTestCase(testcase) for testcase in testcases]) - #suites.addTest(LibraryThingTest('test_cache')) - #suites.addTest(SettingsTest('test_dev_me_alignment')) # give option to test this alignment + suites = unittest.TestSuite([ + unittest.TestLoader().loadTestsFromTestCase(testcase) for testcase in testcases + ]) + return suites @@ -939,4 +957,4 @@ def suite(): #unittest.main() suites = suite() #suites = unittest.defaultTestLoader.loadTestsFromModule(__import__('__main__')) - unittest.TextTestRunner().run(suites) + unittest.TextTestRunner().run(suites) diff --git a/payment/tasks.py b/payment/tasks.py index 8c8faca41..fa264ce44 100644 --- a/payment/tasks.py +++ b/payment/tasks.py @@ -39,7 +39,7 @@ def update_account_status(all_accounts=True, send_notice_on_change_only=True): for account in accounts_to_calc: try: account.update_status(send_notice_on_change_only=send_notice_on_change_only) - except Exception, e: + except Exception as e: errors.append(e) # fire off notices diff --git a/payment/tests.py b/payment/tests.py index c90e34246..8a0fb4cfd 100644 --- a/payment/tests.py +++ b/payment/tests.py @@ -48,7 +48,7 @@ def set_test_logging(): def loginSandbox(selenium): - print "LOGIN SANDBOX" + print("LOGIN SANDBOX") try: selenium.get('https://developer.paypal.com/') @@ -70,9 +70,9 @@ def paySandbox(test, selenium, url, authorize=False, already_at_url=False, sleep if authorize: - print "AUTHORIZE SANDBOX" + print("AUTHORIZE SANDBOX") else: - print "PAY SANDBOX" + print("PAY SANDBOX") try: # We need this sleep to make sure the JS engine is finished from the sandbox loging page @@ -80,7 +80,7 @@ def paySandbox(test, selenium, url, authorize=False, already_at_url=False, sleep if not already_at_url: selenium.get(url) - print "Opened URL %s" % url + print("Opened URL %s" % url) try: # Button is only visible if the login box is NOT open @@ -92,7 +92,7 @@ def paySandbox(test, selenium, url, authorize=False, already_at_url=False, sleep # so selenium can find them, but we need them in view to interact time.sleep(sleep_time) except: - print "Ready for Login" + print("Ready for Login") email_element = WebDriverWait(selenium, 60).until(lambda d : d.find_element_by_id("login_email")) email_element.click() @@ -127,11 +127,11 @@ def paySandbox(test, selenium, url, authorize=False, already_at_url=False, sleep except: traceback.print_exc() - print "Tranasction Complete" + print("Tranasction Complete") def payAmazonSandbox(sel): - print "Expected title: {0} \n Actual Title: {1}".format('Amazon.com Sign In', sel.title) + print("Expected title: {0} \n Actual Title: {1}".format('Amazon.com Sign In', sel.title)) # does it make sense to throw up if there is problem....what better invariants? login_email = WebDriverWait(sel,20).until(lambda d: d.find_element_by_css_selector("input#ap_email")) login_email.click() @@ -146,17 +146,10 @@ def payAmazonSandbox(sel): time.sleep(2) # sel.find_element_by_css_selector("input[type='image']") - print "Expected title: {0} \n Actual Title: {1}".format('Amazon Payments', sel.title) - print "looking for credit_card_confirm", sel.current_url + print("Expected title: {0} \n Actual Title: {1}".format('Amazon Payments', sel.title)) + print ("looking for credit_card_confirm", sel.current_url) credit_card_confirm = WebDriverWait(sel,20).until(lambda d: d.find_elements_by_css_selector("input[type='image']")) credit_card_confirm[-1].click() - - #print "looking for payment_confirm", sel.current_url - #payment_confirm = WebDriverWait(sel,20).until(lambda d: d.find_elements_by_css_selector("input[type='image']")) - #print "payment_confirm ", payment_confirm - #print "len(payment_confirm)", len(payment_confirm) - #time.sleep(1) - #payment_confirm[-1].click() @unittest.skip("skipping PledgeTest (selenium)") class PledgeTest(TestCase): @@ -175,8 +168,8 @@ def validateRedirect(self, t, url, count): self.assertNotEqual(url, None) self.assertNotEqual(t, None) self.assertEqual(t.receiver_set.all().count(), count) - self.assertEqual(t.receiver_set.all()[0].amount, t.amount) - self.assertEqual(t.receiver_set.all()[0].currency, t.currency) + self.assertEqual(t.receiver_set.first().amount, t.amount) + self.assertEqual(t.receiver_set.first().currency, t.currency) # self.assertNotEqual(t.ref1Gerence, None) self.assertEqual(t.error, None) self.assertEqual(t.status, IPN_PAY_STATUS_CREATED) @@ -184,8 +177,8 @@ def validateRedirect(self, t, url, count): valid = URLValidator(verify_exists=True) try: valid(url) - except ValidationError, e: - print e + except ValidationError as e: + print(e) def tearDown(self): self.selenium.quit() @@ -213,12 +206,12 @@ def validateRedirect(self, t, url): valid = URLValidator(verify_exists=True) try: valid(url) - except ValidationError, e: - print e + except ValidationError as e: + print(e) def test_authorize(self): - print "RUNNING TEST: test_authorize" + print("RUNNING TEST: test_authorize") p = PaymentManager() @@ -306,7 +299,7 @@ def testSimple(self): #test pledge adders user.profile.reset_pledge_badge() - self.assertEqual(user.profile.badges.all()[0].name,'pledger') + self.assertEqual(user.profile.badges.first().name,'pledger') p = PaymentManager() results = p.query_campaign(c,campaign_total=True, summary=False) @@ -406,7 +399,7 @@ def test_status_changes(self): account1 = Account(host='host1', account_id='1', user=user1, status='ACTIVE') account1.save() - user = User.objects.all()[0] + user = User.objects.first() account = user1.profile.account self.assertEqual(account.status, 'ACTIVE') diff --git a/payment/urls.py b/payment/urls.py index 076295ccd..8606e2d03 100644 --- a/payment/urls.py +++ b/payment/urls.py @@ -1,5 +1,5 @@ from django.conf import settings -from django.conf.urls import patterns, url, include +from django.conf.urls import url, include from regluit.payment import views diff --git a/payment/views.py b/payment/views.py index d7c01fe02..3dd1469d9 100644 --- a/payment/views.py +++ b/payment/views.py @@ -14,15 +14,14 @@ from django.conf import settings from django.contrib.auth.models import User from django.contrib.sites.requests import RequestSite -from django.core.urlresolvers import reverse +from django.urls import reverse from django.http import ( HttpResponse, HttpRequest, HttpResponseRedirect, HttpResponseBadRequest ) -from django.shortcuts import render_to_response -from django.template import RequestContext +from django.shortcuts import render from django.test.utils import setup_test_environment from django.utils.timezone import now from django.views.decorators.csrf import csrf_exempt @@ -278,7 +277,7 @@ def checkStatus(request): # https://raw.github.com/agiliq/merchant/master/example/app/views.py def _render(request, template, template_vars={}): - return render_to_response(template, template_vars, RequestContext(request)) + return render(request, template, template_vars) class StripeView(FormView): template_name="stripe.html" diff --git a/pyepub/LICENSE b/pyepub/LICENSE deleted file mode 100644 index d1ed6a294..000000000 --- a/pyepub/LICENSE +++ /dev/null @@ -1,16 +0,0 @@ -The MIT License (MIT) -Copyright (c) 2013 Gabriele Alese - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and -to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial -portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/pyepub/README.md b/pyepub/README.md deleted file mode 100644 index 677b5cbc5..000000000 --- a/pyepub/README.md +++ /dev/null @@ -1,65 +0,0 @@ -pyepub -====== - -An enhanced python library for dealing with EPUB2 files. -Based on latest [py-clave](http://github.com/gabalese/py-clave) development release. - -Installation ------------- - -Grab the [latest stable release](https://github.com/gabalese/pyepub/archive/master.zip). Unpack the tarball and execute: - -``` -$ cd pyepub -$ python setup.py install -``` - -This will install the EPUB library in your current python environment as `pyepub`. - -Basic usage ------------ - -The code is as documented as I could. First `import` the EPUB class to use: - -```python -from pyepub import EPUB -``` - -And you're pretty much done. Since pyepub.EPUB inherits largely from zipfile.Zipfile, the inferface is quite familiar. - -For example, you can create a new EPUB to write into using the "w" flag: - -```python -from pyepub import EPUB -epub = EPUB("newfile.epub", "w") -``` - -By default the epub is `open`-ed in read-only mode and exposes json-able dictionary of OPF properties. - -```python ->>> from pyepub import EPUB ->>> epub = EPUB("file.epub") ->>> epub.info -{"metadata":[...], "manifest": [...], "spine": [...], "guide": [...]} -``` - -The EPUB can be opened in append ("a") mode, thus enabling adding content. -Due to the internal nature of zipfile stdlib module, a zipfile can't overwrite its contents. -Thusly, a EPUB opened for append is never overwritten. The `EPUB.__init__` constructor closes the local file and swaps -the reference with a `StringIO` file-like object. To write the final file to disk, you can call the `EPUB.writetodisk()` -method: - -```python ->>> from pyepub import EPUB ->>> epub = EPUB("file.epub","a") ->>> epub.close() # not necessary, since .writetodisk() will close the file for you. ->>> epub.writetodisk("newfile.epub") ->>> epub.filename # the "file" remains available at .filename property, and can be .read() as usual. - -``` - -License -------- - -pyepub is distributed according to the MIT license. I don't like GPL-esque licenses, and I reinvented the wheel (since -there already is a EPUB library in pypi) to avoid involving GPL in my projects. diff --git a/pyepub/__init__.py b/pyepub/__init__.py deleted file mode 100644 index 375ce4426..000000000 --- a/pyepub/__init__.py +++ /dev/null @@ -1,418 +0,0 @@ -import zipfile -import os -import re -import uuid -from StringIO import StringIO -import datetime - -try: - import lxml.etree as ET -except ImportError: - import xml.etree.ElementTree as ET - -NAMESPACE = { - "dc": "{http://purl.org/dc/elements/1.1/}", - "opf": "{http://www.idpf.org/2007/opf}", - "ncx": "{http://www.daisy.org/z3986/2005/ncx/}" -} - -ET.register_namespace('dc', "http://purl.org/dc/elements/1.1/") -ET.register_namespace('opf', "http://www.idpf.org/2007/opf") -ET.register_namespace('ncx', "http://www.daisy.org/z3986/2005/ncx/") - - -class InvalidEpub(Exception): - pass - - -class EPUB(zipfile.ZipFile): - """ - EPUB file representation class. - """ - - def __init__(self, filename, mode="r"): - """ - Global Init Switch - - :type filename: str or StringIO() or file like object for read or add - :param filename: File to be processed - :type mode: str - :param mode: "w" or "r", mode to init the zipfile - """ - self._write_files = {} # a dict of files written to the archive - self._delete_files = [] # a list of files to delete from the archive - self.epub_mode = mode - self.writename = None - if mode == "w": - if isinstance(filename, str): - self.writename = open(filename, "w") # on close, we'll overwrite on this file - else: - # filename is already a file like object - self.writename = filename - dummy= StringIO() - zipfile.ZipFile.__init__(self, dummy, mode="w") # fake - self.__init__write() - elif mode == "a": - # we're not going to write to the file until the very end - if isinstance(filename, str): - self.filename = open(filename, "w") # on close, we'll overwrite on this file - else: - # filename is already a file like object - self.filename = filename - self.filename.seek(0) - temp = StringIO() - temp.write(self.filename.read()) - zipfile.ZipFile.__init__(self, self.filename, mode="r") # r mode doesn't set the filename - self.__init__read(temp) - else: # retrocompatibility? - zipfile.ZipFile.__init__(self, filename, mode="r") - self.__init__read(filename) - - def __init__read(self, filename): - """ - Constructor to initialize the zipfile in read-only mode - - :type filename: str or StringIO() - :param filename: File to be processed - """ - self.filename = filename - try: - # Read the container - f = self.read("META-INF/container.xml") - except KeyError: - # By specification, there MUST be a container.xml in EPUB - print "The %s file is not a valid OCF." % str(filename) - raise InvalidEpub - try: - # There MUST be a full path attribute on first grandchild... - self.opf_path = ET.fromstring(f)[0][0].get("full-path") - except IndexError: - # ...else the file is invalid. - print "The %s file is not a valid OCF." % str(filename) - raise InvalidEpub - - # NEW: json-able info tree - self.info = {"metadata": {}, - "manifest": [], - "spine": [], - "guide": []} - - self.root_folder = os.path.dirname(self.opf_path) # Used to compose absolute paths for reading in zip archive - self.opf = ET.fromstring(self.read(self.opf_path)) # OPF tree - - ns = re.compile(r'\{.*?\}') # RE to strip {namespace} mess - - # Iterate over section, fill EPUB.info["metadata"] dictionary - for i in self.opf.find("{0}metadata".format(NAMESPACE["opf"])): - tag = ns.sub('', i.tag) - if tag not in self.info["metadata"]: - self.info["metadata"][tag] = i.text or i.attrib - else: - self.info["metadata"][tag] = [self.info["metadata"][tag], i.text or i.attrib] - - # Get id of the cover in - try: - coverid = self.opf.find('.//{0}meta[@name="cover"]'.format(NAMESPACE["opf"])).get("content") - except AttributeError: - # It's a facultative field, after all - coverid = None - self.cover = coverid # This is the manifest ID of the cover - - self.info["manifest"] = [{"id": x.get("id"), # Build a list of manifest items - "href": x.get("href"), - "mimetype": x.get("media-type")} - for x in self.opf.find("{0}manifest".format(NAMESPACE["opf"])) if x.get("id")] - - self.info["spine"] = [{"idref": x.get("idref")} # Build a list of spine items - for x in self.opf.find("{0}spine".format(NAMESPACE["opf"])) if x.get("idref")] - try: - self.info["guide"] = [{"href": x.get("href"), # Build a list of guide items - "type": x.get("type"), - "title": x.get("title")} - for x in self.opf.find("{0}guide".format(NAMESPACE["opf"])) if x.get("href")] - except TypeError: # The guide element is optional - self.info["guide"] = None - - # Document identifier - try: - self.id = self.opf.find('.//{0}identifier[@id="{1}"]'.format(NAMESPACE["dc"], - self.opf.get("unique-identifier"))).text - except AttributeError: - raise InvalidEpub("Cannot process an EPUB without unique-identifier attribute of the package element") - # Get and parse the TOC - toc_id = self.opf[2].get("toc") - if toc_id: - expr = ".//{0}item[@id='{1:s}']".format(NAMESPACE["opf"], toc_id) - else: - expr = ".//{0}item[@properties='nav']".format(NAMESPACE["opf"]) - toc_name = self.opf.find(expr).get("href") - self.ncx_path = os.path.join(self.root_folder, toc_name) - self.ncx = ET.fromstring(self.read(self.ncx_path)) - self.contents = [{"name": i[0][0].text or "None", # Build a list of toc elements - "src": os.path.join(self.root_folder, i[1].get("src")), - "id":i.get("id")} - for i in self.ncx.iter("{0}navPoint".format(NAMESPACE["ncx"]))] # The iter method - # loops over nested - - def __init__write(self): - """ - Init an empty EPUB - - """ - self.opf_path = "OEBPS/content.opf" # Define a default folder for contents - self.ncx_path = "OEBPS/toc.ncx" - self.root_folder = "OEBPS" - self.uid = '%s' % uuid.uuid4() - - self.info = {"metadata": {}, - "manifest": [], - "spine": [], - "guide": []} - - self.info["metadata"]["creator"] = "py-clave server" - self.info["metadata"]["title"] = "" - self.info["metadata"]["language"] = "" - - self.opf = ET.fromstring(self._init_opf()) # opf property is always a ElementTree - self.ncx = ET.fromstring(self._init_ncx()) # so is ncx. Consistent with self.(opf|ncx) built by __init_read() - - self.writestr(self.opf_path, ET.tostring(self.opf, encoding="UTF-8")) # temporary opf & ncx - self.writestr(self.ncx_path, ET.tostring(self.ncx, encoding="UTF-8")) # will be re-init on close() - - @property - def author(self): - return self.info["metadata"]["creator"] - - @author.setter - def author(self, value): - tmp = self.opf.find(".//{0}creator".format(NAMESPACE["dc"])) - tmp.text = value - self.info["metadata"]["creator"] = value - - @property - def title(self): - return self.info["metadata"]["title"] - - @title.setter - def title(self, value): - tmp = self.opf.find(".//{0}title".format(NAMESPACE["dc"])) - tmp.text = value - ncx_title = self.ncx.find("{http://www.daisy.org/z3986/2005/ncx/}docTitle")[0] - ncx_title.text = value - self.info["metadata"]["title"] = value - - @property - def language(self): - return self.info["metadata"]["language"] - - @language.setter - def language(self, value): - tmp = self.opf.find(".//{0}language".format(NAMESPACE["dc"])) - tmp.text = value - self.info["metadata"]["language"] = value - - def close(self): - if self.fp is None: # Check file status - return - if self.mode == "r": # check file mode - zipfile.ZipFile.close(self) - return - else: - try: - self._safeclose() - zipfile.ZipFile.close(self) # give back control to superclass close method - except RuntimeError: # zipfile.__del__ destructor calls close(), ignore - return - - def _safeclose(self): - """ - Preliminary operations before closing an EPUB - Writes the empty or modified opf-ncx files before closing the zipfile - """ - if self.epub_mode == 'w': - self.writetodisk(self.writename) - else: - self.writetodisk(self.filename) - - def _write_epub_zip(self, epub_zip): - """ - writes the epub to the specified writable zipfile instance - - :type epub_zip: an empty instance of zipfile.Zipfile, mode=w - :param epub_zip: zip file to write - """ - epub_zip.writestr('mimetype', "application/epub+zip") # requirement of epub container format - epub_zip.writestr('META-INF/container.xml', self._containerxml()) - epub_zip.writestr(self.opf_path, ET.tostring(self.opf, encoding="UTF-8")) - epub_zip.writestr(self.ncx_path, ET.tostring(self.ncx, encoding="UTF-8")) - paths = ['mimetype','META-INF/container.xml',self.opf_path,self.ncx_path]+ self._write_files.keys() + self._delete_files - if self.epub_mode != 'w': - for item in self.infolist(): - if item.filename not in paths: - epub_zip.writestr(item.filename, self.read(item.filename)) - for key in self._write_files.keys(): - epub_zip.writestr(key, self._write_files[key]) - - def _init_opf(self): - """ - Constructor for empty OPF - :type return: xml.minidom.Document - :return: xml.minidom.Document - """ - today = datetime.date.today() - opf_tmpl = """ - - - {uid} - - - - {date} - - - - - - - - - """ - - doc = opf_tmpl.format(uid=self.uid, date=today) - return doc - - def _init_ncx(self): - """ - Constructor for empty OPF - :type return: xml.minidom.Document - :return: xml.minidom.Document - """ - ncx_tmpl = """ - - - - - - - - - - {title} - - - - """ - - ncx = ncx_tmpl.format(uid=self.uid, title="Default") - return ncx - - def _containerxml(self): - template = """ - - - - - """ - return template % self.opf_path - - def _delete(self, *paths): - """ - Delete archive member - - :type paths: [str] - :param paths: files to be deleted inside EPUB file - """ - for path in paths: - try: - del self._write_files[path] - except KeyError: - pass - self._delete_files.append(path) - - def addmetadata(self, term, value, namespace='dc'): - """ - Add an metadata entry - - :type term: str - :param term: element name/tag for metadata item - :type value: str - :param value: a value - :type namespace: str - :param namespace. either a '{URI}' or a registered prefix ('dc', 'opf', 'ncx') are currently built-in - """ - assert self.epub_mode != "r", "%s is not writable" % self - namespace = NAMESPACE.get(namespace,namespace) - element = ET.Element(namespace+term, attrib={}) - element.text = value - self.opf[0].append(element) - # note that info is ignoring namespace entirely - if self.info["metadata"].has_key(term): - self.info["metadata"][term] = [self.info["metadata"][term] , value] - else: - self.info["metadata"][term] = value - - def _writestr(self, filepath, filebytes): - self._write_files[filepath] = filebytes - - def additem(self, fileObject, href, mediatype): - """ - Add a file to manifest only - - :type fileObject: StringIO - :param fileObject: - :type href: str - :param href: - :type mediatype: str - :param mediatype: - """ - assert self.epub_mode != "r", "%s is not writable" % self - element = ET.Element(NAMESPACE.get("opf")+"item", - attrib={"id": "id_"+str(uuid.uuid4())[:5], "href": href, "media-type": mediatype}) - - try: - self._writestr(os.path.join(self.root_folder, element.attrib["href"]), fileObject.getvalue().encode('utf-8')) - except AttributeError: - self._writestr(os.path.join(self.root_folder, element.attrib["href"]), fileObject) - self.opf[1].append(element) - return element.attrib["id"] - - def addpart(self, fileObject, href, mediatype, position=None, reftype="text", linear="yes"): - """ - Add a file as part of the epub file, i.e. to manifest and spine (and guide?) - - :param fileObject: file to be inserted - :param href: path inside the epub archive - :param mediatype: mimetype of the fileObject - :type position: int - :param position: order in spine [from 0 to len(opf/manifest))] - :param linear: linear="yes" or "no" - :param reftype: type to assign in guide/reference - """ - assert self.epub_mode != "r", "%s is not writable" % self - fileid = self.additem(fileObject, href, mediatype) - itemref = ET.Element(NAMESPACE.get("opf")+"itemref", attrib={"idref": fileid, "linear": linear}) - reference = ET.Element(NAMESPACE.get("opf")+"reference", attrib={"title": href, "href": href, "type": reftype}) - if position is None or position>len(self.opf[2]): - self.opf[2].append(itemref) - if self.info["guide"]: - self.opf[3].append(reference) - else: - self.opf[2].insert(position, itemref) - if self.info["guide"] and len(self.opf[3]) >= position+1: - self.opf[3].insert(position, reference) - - def writetodisk(self, filename): - """ - Writes the in-memory archive to disk - - :type filename: str - :param filename: name of the file to be writte - """ - - filename.seek(0) - new_zip = zipfile.ZipFile(filename, 'w') - self._write_epub_zip(new_zip) - new_zip.close() diff --git a/pyepub/tests.py b/pyepub/tests.py deleted file mode 100644 index 2ced1da32..000000000 --- a/pyepub/tests.py +++ /dev/null @@ -1,112 +0,0 @@ -# coding=utf-8 -import unittest -import urllib2 -import zipfile -import random -from tempfile import NamedTemporaryFile -from StringIO import StringIO -from . import EPUB -try: - import lxml.etree as ET -except ImportError: - import xml.etree.ElementTree as ET - - -class EpubTests(unittest.TestCase): - - def setUp(self): - # get a small epub test file as a file-like object - self.epub2file = NamedTemporaryFile(delete=False) - test_file_content = urllib2.urlopen('http://www.hxa.name/articles/content/EpubGuide-hxa7241.epub') - self.epub2file.write(test_file_content.read()) - self.epub2file.seek(0) - # get an epub with no guide element - self.epub2file2 = NamedTemporaryFile(delete=False) - test_file_content2 = urllib2.urlopen('http://www.gutenberg.org/ebooks/2701.epub.noimages') - self.epub2file2.write(test_file_content2.read()) - self.epub2file2.seek(0) - - - - def test_instantiation(self): - epub=EPUB(self.epub2file) - members = len(epub.namelist()) - self.assertNotEqual(epub.filename, None) - self.assertEqual(len(epub.opf),4) - self.assertEqual(len(epub.opf[0]),11) #metadata items - self.assertEqual(len(epub.opf[1]),11) #manifest items - self.assertEqual(len(epub.opf[2]),8) #spine items - self.assertEqual(len(epub.opf[3]),3) #guide items - # test writing - new_epub=StringIO() - #epub.writetodisk("test_instantiation") - epub.writetodisk(new_epub) - epub=EPUB(new_epub) - self.assertEqual(len(epub.opf),4) - self.assertEqual(members,len(epub.namelist())) - self.assertTrue(zipfile.is_zipfile(new_epub)) - - def test_addpart(self): - epub=EPUB(self.epub2file,mode='a') - members = len(epub.namelist()) - self.assertNotEqual(epub.filename, None) - part = StringIO('') - epub.addpart(part, "testpart.xhtml", "application/xhtml+xml", 2) - self.assertEqual(len(epub.opf[2]),9) #spine items - # test writing - new_epub=StringIO() - epub.writetodisk(new_epub) - epub=EPUB(new_epub) - self.assertEqual(len(epub.opf[2]),9) - self.assertEqual(members+1,len(epub.namelist())) - #test delete - epub._delete("testpart.xhtml") - new_epub=StringIO() - epub.writetodisk(new_epub) - new_zip = zipfile.ZipFile(new_epub) - self.assertEqual(members,len(new_zip.namelist())) - self.assertTrue(zipfile.is_zipfile(new_epub)) - - def test_addpart_noguide(self): - epub2=EPUB(self.epub2file2,mode='a') - self.assertEqual(len(epub2.opf),3) - self.assertEqual(epub2.info['guide'],None) - num_spine_items = len(epub2.opf[2]) - uxml = u'VojtěchVojtíšek' - part = StringIO(unicode(uxml)) - epub2.addpart(part, "testpart.xhtml", "application/xhtml+xml", 2) - self.assertEqual(len(epub2.opf[2]), num_spine_items +1) #spine items - new_epub=StringIO() - epub2.writetodisk(new_epub) - epub2=EPUB(new_epub) - - def test_addmetadata(self): - epub=EPUB(self.epub2file,mode='a') - members = len(epub.namelist()) - epub.addmetadata('test', 'GOOD') - self.assertIn('GOOD<',ET.tostring(epub.opf, encoding="UTF-8")) - self.assertTrue(epub.opf.find('.//{http://purl.org/dc/elements/1.1/}test') is not None) - self.assertEqual(epub.info['metadata']['test'], 'GOOD') - # test writing - new_epub=StringIO() - epub.writetodisk(new_epub) - epub=EPUB(new_epub) - self.assertEqual(epub.info['metadata']['test'], 'GOOD') - new_zip = zipfile.ZipFile(new_epub) - self.assertEqual(members,len(new_zip.namelist())) - self.assertTrue(zipfile.is_zipfile(new_epub)) - - def test_new_epub(self): - f = '%012x.epub' % random.randrange(16**12) #random name - epub=EPUB(f,mode='w') - epub.addmetadata('test', 'GOOD') - uxml = u'VojtěchVojtíšek' - part = StringIO(unicode(uxml)) - epub.addpart(part, "testpart.xhtml", "application/xhtml+xml", 2) - epub.close() - epub=EPUB(f,mode='r') - self.assertEqual(len(epub.opf),4) - self.assertEqual(len(epub.opf[0]),6) #metadata items - self.assertEqual(len(epub.opf[1]),2) #manifest items - self.assertEqual(len(epub.opf[2]),1) #spine items - self.assertEqual(len(epub.opf[3]),0) #guide items diff --git a/questionnaire/__init__.py b/questionnaire/__init__.py new file mode 100644 index 000000000..862c353e3 --- /dev/null +++ b/questionnaire/__init__.py @@ -0,0 +1 @@ +# this app removes the functionality of the fef-questionnaire app while redirecting the user to the book the questionnaire was asking about. \ No newline at end of file diff --git a/questionnaire/admin.py b/questionnaire/admin.py new file mode 100644 index 000000000..5847e3367 --- /dev/null +++ b/questionnaire/admin.py @@ -0,0 +1,18 @@ +from django.contrib import admin +from django.urls import reverse +from .models import Landing + +adminsite = admin.site + + +from django.contrib import admin + +# new in dj1.7 +# @admin.register(Landing) +class LandingAdmin(admin.ModelAdmin): + list_display = ('label', 'content_type', 'object_id', ) + ordering = [ 'object_id', ] + + +adminsite.register(Landing, LandingAdmin) + diff --git a/questionnaire/apps.py b/questionnaire/apps.py new file mode 100644 index 000000000..896c4cec0 --- /dev/null +++ b/questionnaire/apps.py @@ -0,0 +1,3 @@ +# questionnaire/apps.py +from . import __name__ as app_name +from django.apps import AppConfig diff --git a/questionnaire/migrations/0001_initial.py b/questionnaire/migrations/0001_initial.py new file mode 100644 index 000000000..c18696ce1 --- /dev/null +++ b/questionnaire/migrations/0001_initial.py @@ -0,0 +1,208 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('contenttypes', '0002_remove_content_type_name'), + ] + + operations = [ + migrations.CreateModel( + name='Answer', + fields=[ + ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), + ('runid', models.CharField(help_text='The RunID (ie. year)', max_length=32, verbose_name='RunID')), + ('answer', models.TextField()), + ], + ), + migrations.CreateModel( + name='Choice', + fields=[ + ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), + ('sortid', models.IntegerField()), + ('value', models.CharField(max_length=64, verbose_name='Short Value')), + ('text_en', models.CharField(max_length=200, null=True, verbose_name='Choice Text', blank=True)), + ('tags', models.CharField(max_length=64, verbose_name='Tags', blank=True)), + ], + ), + migrations.CreateModel( + name='DBStylesheet', + fields=[ + ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), + ('inclusion_tag', models.CharField(max_length=128)), + ('content', models.TextField()), + ], + ), + migrations.CreateModel( + name='GlobalStyles', + fields=[ + ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), + ('content', models.TextField()), + ], + ), + migrations.CreateModel( + name='Landing', + fields=[ + ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), + ('nonce', models.CharField(max_length=32, null=True, blank=True)), + ('object_id', models.PositiveIntegerField(null=True, blank=True)), + ('label', models.CharField(max_length=64, blank=True)), + ('content_type', models.ForeignKey(on_delete=models.CASCADE, related_name='landings', blank=True, to='contenttypes.ContentType', null=True)), + ], + ), + migrations.CreateModel( + name='Question', + fields=[ + ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), + ('number', models.CharField(help_text=b'eg. 1, 2a, 2b, 3c
    Number is also used for ordering questions.', max_length=8)), + ('sort_id', models.IntegerField(help_text=b'Questions within a questionset are sorted by sort order first, question number second', null=True, blank=True)), + ('text_en', models.TextField(null=True, verbose_name='Text', blank=True)), + ('type', models.CharField(help_text="Determines the means of answering the question. An open question gives the user a single-line textfield, multiple-choice gives the user a number of choices he/she can choose from. If a question is multiple-choice, enter the choices this user can choose from below'.", max_length=32, verbose_name='Type of question', choices=[(b'open', b'Open Answer, single line [input]'), (b'open-textfield', b'Open Answer, multi-line [textarea]'), (b'choice-yesno', b'Yes/No Choice [radio]'), (b'choice-yesnocomment', b'Yes/No Choice with optional comment [radio, input]'), (b'choice-yesnodontknow', b"Yes/No/Don't know Choice [radio]"), (b'choice-yesno-optional', b'Optional Yes/No Choice [radio]'), (b'choice-yesnocomment-optional', b'Optional Yes/No Choice with optional comment [radio, input]'), (b'choice-yesnodontknow-optional', b"Optional Yes/No/Don't know Choice [radio]"), (b'comment', b'Comment Only'), (b'choice', b'Choice [radio]'), (b'choice-freeform', b'Choice with a freeform option [radio]'), (b'choice-optional', b'Optional choice [radio]'), (b'choice-freeform-optional', b'Optional choice with a freeform option [radio]'), (b'dropdown', b'Dropdown choice [select]'), (b'choice-multiple', b'Multiple-Choice, Multiple-Answers [checkbox]'), (b'choice-multiple-freeform', b'Multiple-Choice, Multiple-Answers, plus freeform [checkbox, input]'), (b'choice-multiple-values', b'Multiple-Choice, Multiple-Answers [checkboxes], plus value box [input] for each selected answer'), (b'range', b'Range of numbers [select]'), (b'number', b'Number [input]'), (b'timeperiod', b'Time Period [input, select]'), (b'custom', b'Custom field'), (b'sameas', b'Same as Another Question (put sameas=question.number in checks or sameasid=question.id)')])), + ('extra_en', models.CharField(help_text='Extra information (use on question type)', max_length=512, null=True, verbose_name='Extra information', blank=True)), + ('checks', models.CharField(help_text=b'Additional checks to be performed for this value (space separated)

    For text fields, required is a valid check.
    For yes/no choice, required, required-yes, and required-no are valid.

    If this question is required only if another question\'s answer is something specific, use requiredif="QuestionNumber,Value" or requiredif="QuestionNumber,!Value" for anything but a specific value. You may also combine tests appearing in requiredif by joining them with the words and or or, eg. requiredif="Q1,A or Q2,B"', max_length=512, null=True, verbose_name='Additional checks', blank=True)), + ('footer_en', models.TextField(help_text=b'Footer rendered below the question', null=True, verbose_name='Footer', blank=True)), + ('parse_html', models.BooleanField(default=False, verbose_name=b'Render html in Footer?')), + ], + ), + migrations.CreateModel( + name='Questionnaire', + fields=[ + ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), + ('name', models.CharField(max_length=128)), + ('redirect_url', models.CharField(default=b'', help_text=b"URL to redirect to when Questionnaire is complete. Macros: $SUBJECTID, $RUNID, $LANG. Leave blank to render the 'complete.$LANG.html' template.", max_length=128, blank=True)), + ('html', models.TextField(verbose_name='Html', blank=True)), + ('parse_html', models.BooleanField(default=False, verbose_name=b'Render html instead of name for questionnaire?')), + ('admin_access_only', models.BooleanField(default=False, verbose_name=b'Only allow access to logged in users? (This allows entering paper questionnaires without allowing new external submissions)')), + ], + options={ + 'permissions': (('export', 'Can export questionnaire answers'), ('management', 'Management Tools')), + }, + ), + migrations.CreateModel( + name='QuestionSet', + fields=[ + ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), + ('sortid', models.IntegerField()), + ('heading', models.CharField(max_length=64)), + ('checks', models.CharField(help_text=b'Current options are \'femaleonly\' or \'maleonly\' and shownif="QuestionNumber,Answer" which takes the same format as requiredif for questions.', max_length=256, blank=True)), + ('text_en', models.TextField(help_text=b'HTML or Text', null=True, verbose_name='Text', blank=True)), + ('parse_html', models.BooleanField(default=False, verbose_name=b'Render html in heading?')), + ('questionnaire', models.ForeignKey(on_delete=models.CASCADE, to='questionnaire.Questionnaire')), + ], + ), + migrations.CreateModel( + name='RunInfo', + fields=[ + ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), + ('random', models.CharField(max_length=32)), + ('runid', models.CharField(max_length=32)), + ('emailcount', models.IntegerField(default=0)), + ('created', models.DateTimeField(auto_now_add=True)), + ('emailsent', models.DateTimeField(null=True, blank=True)), + ('lastemailerror', models.CharField(max_length=64, null=True, blank=True)), + ('state', models.CharField(max_length=16, null=True, blank=True)), + ('cookies', models.TextField(null=True, blank=True)), + ('tags', models.TextField(help_text='Tags active on this run, separated by commas', blank=True)), + ('skipped', models.TextField(help_text='A comma sepearted list of questions to skip', blank=True)), + ('landing', models.ForeignKey(on_delete=models.CASCADE, blank=True, to='questionnaire.Landing', null=True)), + ('questionset', models.ForeignKey(on_delete=models.CASCADE, blank=True, to='questionnaire.QuestionSet', null=True)), + ], + options={ + 'verbose_name_plural': 'Run Info', + }, + ), + migrations.CreateModel( + name='RunInfoHistory', + fields=[ + ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), + ('runid', models.CharField(max_length=32)), + ('completed', models.DateTimeField()), + ('tags', models.TextField(help_text='Tags used on this run, separated by commas', blank=True)), + ('skipped', models.TextField(help_text='A comma sepearted list of questions skipped by this run', blank=True)), + ('landing', models.ForeignKey(on_delete=models.CASCADE, blank=True, to='questionnaire.Landing', null=True)), + ('questionnaire', models.ForeignKey(on_delete=models.CASCADE, to='questionnaire.Questionnaire')), + ], + options={ + 'verbose_name_plural': 'Run Info History', + }, + ), + migrations.CreateModel( + name='Subject', + fields=[ + ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), + ('state', models.CharField(default=b'inactive', max_length=16, verbose_name='State', choices=[(b'active', 'Active'), (b'inactive', 'Inactive')])), + ('anonymous', models.BooleanField(default=False)), + ('ip_address', models.GenericIPAddressField(null=True, blank=True)), + ('surname', models.CharField(max_length=64, null=True, verbose_name='Surname', blank=True)), + ('givenname', models.CharField(max_length=64, null=True, verbose_name='Given name', blank=True)), + ('email', models.EmailField(max_length=254, null=True, verbose_name='Email', blank=True)), + ('gender', models.CharField(default=b'unset', max_length=8, verbose_name='Gender', blank=True, choices=[(b'unset', 'Unset'), (b'male', 'Male'), (b'female', 'Female')])), + ('nextrun', models.DateField(null=True, verbose_name='Next Run', blank=True)), + ('formtype', models.CharField(default=b'email', max_length=16, verbose_name='Form Type', choices=[(b'email', 'Subject receives emails'), (b'paperform', 'Subject is sent paper form')])), + ('language', models.CharField(default=b'en-us', max_length=5, verbose_name='Language', choices=[(b'en', b'English')])), + ], + ), + migrations.AlterIndexTogether( + name='subject', + index_together=set([('givenname', 'surname')]), + ), + migrations.AddField( + model_name='runinfohistory', + name='subject', + field=models.ForeignKey(on_delete=models.CASCADE, to='questionnaire.Subject'), + ), + migrations.AddField( + model_name='runinfo', + name='subject', + field=models.ForeignKey(on_delete=models.CASCADE, to='questionnaire.Subject'), + ), + migrations.AddField( + model_name='question', + name='questionset', + field=models.ForeignKey(on_delete=models.CASCADE, to='questionnaire.QuestionSet'), + ), + migrations.AddField( + model_name='landing', + name='questionnaire', + field=models.ForeignKey(on_delete=models.CASCADE, related_name='landings', blank=True, to='questionnaire.Questionnaire', null=True), + ), + migrations.AddField( + model_name='choice', + name='question', + field=models.ForeignKey(on_delete=models.CASCADE, to='questionnaire.Question'), + ), + migrations.AddField( + model_name='answer', + name='question', + field=models.ForeignKey(on_delete=models.CASCADE, help_text='The question that this is an answer to', to='questionnaire.Question'), + ), + migrations.AddField( + model_name='answer', + name='subject', + field=models.ForeignKey(on_delete=models.CASCADE, help_text='The user who supplied this answer', to='questionnaire.Subject'), + ), + migrations.AlterIndexTogether( + name='runinfo', + index_together=set([('random',)]), + ), + migrations.AlterIndexTogether( + name='questionset', + index_together=set([('questionnaire', 'sortid'), ('sortid',)]), + ), + migrations.AlterIndexTogether( + name='question', + index_together=set([('number', 'questionset')]), + ), + migrations.AlterIndexTogether( + name='choice', + index_together=set([('value',)]), + ), + migrations.AlterIndexTogether( + name='answer', + index_together=set([('subject', 'runid', 'id'), ('subject', 'runid')]), + ), + ] diff --git a/questionnaire/migrations/0002_auto_20160929_1320.py b/questionnaire/migrations/0002_auto_20160929_1320.py new file mode 100644 index 000000000..ba1889277 --- /dev/null +++ b/questionnaire/migrations/0002_auto_20160929_1320.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('questionnaire', '0001_initial'), + ] + + operations = [ + migrations.CreateModel( + name='Run', + fields=[ + ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), + ('runid', models.CharField(max_length=32, null=True)), + ], + ), + migrations.AddField( + model_name='answer', + name='run', + field=models.ForeignKey(on_delete=models.CASCADE, related_name='answers', to='questionnaire.Run', null=True), + ), + migrations.AddField( + model_name='runinfo', + name='run', + field=models.ForeignKey(on_delete=models.CASCADE, related_name='run_infos', to='questionnaire.Run', null=True), + ), + migrations.AddField( + model_name='runinfohistory', + name='run', + field=models.ForeignKey(on_delete=models.CASCADE, related_name='run_info_histories', to='questionnaire.Run', null=True), + ), + ] diff --git a/questionnaire/migrations/0003_auto_20160929_1321.py b/questionnaire/migrations/0003_auto_20160929_1321.py new file mode 100644 index 000000000..73fb23bd7 --- /dev/null +++ b/questionnaire/migrations/0003_auto_20160929_1321.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from django.db import migrations, models + + +def models_to_migrate(apps): + return [ + apps.get_model('questionnaire', 'RunInfo'), + apps.get_model('questionnaire', 'RunInfoHistory'), + apps.get_model('questionnaire', 'Answer'), + ] + +class Migration(migrations.Migration): + + def move_runids(apps, schema_editor): + Run = apps.get_model('questionnaire', 'Run') + for model in models_to_migrate(apps): + for instance in model.objects.all(): + (run, created) = Run.objects.get_or_create(runid=instance.runid) + instance.run = run + instance.save() + + def unmove_runids(apps, schema_editor): + for model in models_to_migrate(apps): + for instance in model.objects.all(): + instance.runid = instance.run.runid + instance.save() + + dependencies = [ + ('questionnaire', '0002_auto_20160929_1320'), + ] + + operations = [ + migrations.RunPython(move_runids, reverse_code=unmove_runids, hints={'questionnaire': 'Run'}), + ] diff --git a/questionnaire/migrations/0004_auto_20160929_1800.py b/questionnaire/migrations/0004_auto_20160929_1800.py new file mode 100644 index 000000000..7c3ad44bd --- /dev/null +++ b/questionnaire/migrations/0004_auto_20160929_1800.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('questionnaire', '0003_auto_20160929_1321'), + ] + + operations = [ + migrations.RemoveField( + model_name='runinfo', + name='runid', + ), + migrations.RemoveField( + model_name='runinfohistory', + name='runid', + ), + migrations.AlterField( + model_name='answer', + name='run', + field=models.ForeignKey(on_delete=models.CASCADE, related_name='answers', default=1, to='questionnaire.Run'), + preserve_default=False, + ), + migrations.AlterField( + model_name='runinfo', + name='run', + field=models.ForeignKey(on_delete=models.CASCADE, related_name='run_infos', default=1, to='questionnaire.Run'), + preserve_default=False, + ), + migrations.AlterField( + model_name='runinfohistory', + name='run', + field=models.ForeignKey(on_delete=models.CASCADE, related_name='run_info_histories', to='questionnaire.Run'), + ), + migrations.AlterIndexTogether( + name='answer', + index_together=set([('subject', 'run'), ('subject', 'run', 'id')]), + ), + migrations.RemoveField( + model_name='answer', + name='runid', + ), + ] diff --git a/questionnaire/migrations/0005_auto_20230412_1421.py b/questionnaire/migrations/0005_auto_20230412_1421.py new file mode 100644 index 000000000..5bb7fa030 --- /dev/null +++ b/questionnaire/migrations/0005_auto_20230412_1421.py @@ -0,0 +1,128 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.11.29 on 2023-04-12 14:21 +from __future__ import unicode_literals + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('questionnaire', '0004_auto_20160929_1800'), + ] + + operations = [ + migrations.AlterIndexTogether( + name='answer', + index_together=set([]), + ), + migrations.RemoveField( + model_name='answer', + name='question', + ), + migrations.RemoveField( + model_name='answer', + name='run', + ), + migrations.RemoveField( + model_name='answer', + name='subject', + ), + migrations.AlterIndexTogether( + name='choice', + index_together=set([]), + ), + migrations.RemoveField( + model_name='choice', + name='question', + ), + migrations.DeleteModel( + name='DBStylesheet', + ), + migrations.DeleteModel( + name='GlobalStyles', + ), + migrations.AlterIndexTogether( + name='question', + index_together=set([]), + ), + migrations.RemoveField( + model_name='question', + name='questionset', + ), + migrations.AlterIndexTogether( + name='questionset', + index_together=set([]), + ), + migrations.RemoveField( + model_name='questionset', + name='questionnaire', + ), + migrations.AlterIndexTogether( + name='runinfo', + index_together=set([]), + ), + migrations.RemoveField( + model_name='runinfo', + name='landing', + ), + migrations.RemoveField( + model_name='runinfo', + name='questionset', + ), + migrations.RemoveField( + model_name='runinfo', + name='run', + ), + migrations.RemoveField( + model_name='runinfo', + name='subject', + ), + migrations.RemoveField( + model_name='runinfohistory', + name='landing', + ), + migrations.RemoveField( + model_name='runinfohistory', + name='questionnaire', + ), + migrations.RemoveField( + model_name='runinfohistory', + name='run', + ), + migrations.RemoveField( + model_name='runinfohistory', + name='subject', + ), + migrations.DeleteModel( + name='Subject', + ), + migrations.RemoveField( + model_name='landing', + name='questionnaire', + ), + migrations.DeleteModel( + name='Answer', + ), + migrations.DeleteModel( + name='Choice', + ), + migrations.DeleteModel( + name='Question', + ), + migrations.DeleteModel( + name='Questionnaire', + ), + migrations.DeleteModel( + name='QuestionSet', + ), + migrations.DeleteModel( + name='Run', + ), + migrations.DeleteModel( + name='RunInfo', + ), + migrations.DeleteModel( + name='RunInfoHistory', + ), + ] diff --git a/logs/.keep b/questionnaire/migrations/__init__.py similarity index 100% rename from logs/.keep rename to questionnaire/migrations/__init__.py diff --git a/questionnaire/models.py b/questionnaire/models.py new file mode 100644 index 000000000..2f339c902 --- /dev/null +++ b/questionnaire/models.py @@ -0,0 +1,40 @@ +import hashlib +import json +import re +import uuid +from datetime import datetime +from six import text_type as unicodestr + +from django.conf import settings +from django.contrib.contenttypes.fields import GenericForeignKey +from django.contrib.contenttypes.models import ContentType +from django.db import models +from django.db.models.signals import post_save +from django.utils.translation import ugettext_lazy as _ +from django.urls import reverse + + +_numre = re.compile("(\d+)([a-z]+)", re.I) + + +class Landing(models.Model): + # defines an entry point to a Feedback session + nonce = models.CharField(max_length=32, null=True,blank=True) + content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE, null=True,blank=True, related_name='landings') + object_id = models.PositiveIntegerField(null=True,blank=True) + content_object = GenericForeignKey('content_type', 'object_id') + label = models.CharField(max_length=64, blank=True) + def _hash(self): + return uuid.uuid4().hex + + def __str__(self): + return self.label + + def url(self): + try: + return settings.BASE_URL_SECURE + reverse('landing', args=[self.nonce]) + except AttributeError: + # not using sites + return reverse('landing', args=[self.nonce]) + + diff --git a/questionnaire/urls.py b/questionnaire/urls.py new file mode 100644 index 000000000..8b77f188e --- /dev/null +++ b/questionnaire/urls.py @@ -0,0 +1,11 @@ +# vim: set fileencoding=utf-8 + +from django.conf.urls import * +from django.conf import settings +from .views import to_model + + +urlpatterns = [ + url(r'^landing/(?P\w+)/$', to_model, name="landing"), +] + diff --git a/questionnaire/views.py b/questionnaire/views.py new file mode 100644 index 000000000..42c3190f9 --- /dev/null +++ b/questionnaire/views.py @@ -0,0 +1,23 @@ +#!/usr/bin/python +# vim: set fileencoding=utf-8 + +import logging + + + +from django.http import HttpResponse, HttpResponseRedirect +from django.urls import reverse +from django.shortcuts import get_object_or_404 + +from .models import Landing + + +def to_model(request, **kwargs): + nonce = kwargs['nonce'] + landing = get_object_or_404(Landing, nonce=nonce) + if landing.content_object: + work_id = str(landing.content_object.id) + return HttpResponseRedirect(reverse('work', kwargs={'work_id': work_id})) + else: + return HttpResponseRedirect('/') + diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 000000000..a8301fa66 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,116 @@ +-i https://pypi.org/simple +amqp==2.6.1; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4' +appdirs==1.4.4 +arabic-reshaper==3.0.0 +asn1crypto==1.5.1 +beautifulsoup4==4.11.1; python_full_version >= '3.6.0' +billiard==3.6.4.0 +boto3==1.17.91; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5' +botocore==1.20.112; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5' +cachetools==5.3.2; python_version >= '3.7' +cairocffi==0.8.0 +celery==4.4.7; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4' +certifi==2023.11.17; python_version >= '3.6' +cffi==1.16.0; python_version >= '3.8' +chardet==5.2.0; python_version >= '3.7' +charset-normalizer==3.3.2; python_full_version >= '3.7.0' +click==8.1.7; python_version >= '3.7' +colorama==0.4.6; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6' +confusable-homoglyphs==3.2.0 +cryptography==41.0.7; python_version >= '3.7' +cssselect2==0.7.0; python_version >= '3.7' +defusedxml==0.8.0rc2; python_version >= '3.6' +distlib==0.3.8 +django==1.11.29 +django-ckeditor==5.6.1 +django-contrib-comments==2.0.0 +django-el-pagination==3.2.4 +-e git+https://github.com/eshellman/django-email-change.git@fb063296cbf4e4a6d8a93d34d98fe0c7739c2e0d#egg=django-email-change +django-extensions==3.1.1; python_version >= '3.6' +django-js-asset==1.2.3 +django-jsonfield==1.0.0 +django-mptt==0.8.6 +-e git+https://github.com/eshellman/django-notification.git@1ad2be4adf3551a3471d923380368341452e178a#egg=django-notification +django-registration==2.4.1 +django-sass-processor==0.8.2 +django-selectable==1.1.0 +django-storages==1.5.2 +django-tastypie==0.14.1 +docopt==0.6.2 +filelock==3.13.1; python_version >= '3.8' +gitberg==0.8.7 +gitdb==4.0.11; python_version >= '3.7' +github3.py==4.0.1; python_version >= '3.7' +gitpython==3.1.40; python_version >= '3.7' +html5lib==1.1; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4' +idna==3.6; python_version >= '3.5' +isodate==0.6.1 +jinja2==3.1.2; python_version >= '3.7' +jmespath==0.10.0; python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3' +kombu==4.6.11; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4' +lxml==4.9.4; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4' +mailchimp3==3.0.14 +markupsafe==2.1.3; python_version >= '3.7' +mechanize==0.4.5 +mock==2.0.0 +mysqlclient==1.4.6 +oauthlib==3.2.2; python_version >= '3.6' +oscrypto==1.3.0 +packaging==23.2; python_version >= '3.7' +pbr==6.0.0; python_version >= '2.6' +pillow==9.5.0; python_version >= '3.8' +platformdirs==4.1.0; python_version >= '3.8' +pluggy==1.3.0; python_version >= '3.8' +pycparser==2.21 +pyepub==0.5.0 +pyhanko==0.21.0; python_version >= '3.8' +pyhanko-certvalidator==0.26.3; python_version >= '3.7' +pyjwt[crypto]==2.8.0; python_version >= '3.7' +pymarc==4.2.1; python_version >= '3.6' +pyoai==2.5.0 +pyopenssl==23.3.0; python_version >= '3.7' +pyparsing==3.1.1; python_full_version >= '3.6.8' +pypdf>=5.0.0 +pypng==0.20220715.0 +pyproject-api==1.6.1; python_version >= '3.8' +python-bidi==0.4.2 +python-dateutil==2.8.2; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3' +python-mimeparse==1.6.0 +python3-openid==3.2.0 +pytz==2023.3.post1 +pyyaml==6.0.1; python_version >= '3.6' +qrcode==7.4.2; python_version >= '3.7' +rdflib==7.0.0; python_full_version >= '3.8.1' and python_full_version < '4.0.0' +redis==3.5.3; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4' +reportlab==4.0.8; python_version >= '3.7' and python_version < '4' +requests>=2.31.0; python_version >= '3.7' +requests-mock==1.8.0 +requests-oauthlib==1.3.1; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3' +risparser==0.4.3 +s3transfer==0.4.2 +selenium==3.141.0 +semver==2.2.0 +sh==2.0.6; python_version < '4.0' and python_full_version >= '3.8.1' +six==1.16.0; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3' +smmap==5.0.1; python_version >= '3.7' +social-auth-app-django==2.1.0 +social-auth-core==4.5.1; python_version >= '3.8' +sorl-thumbnail==12.6.3; python_version >= '3.4' +soupsieve==2.5; python_version >= '3.8' +sparqlwrapper==2.0.0; python_version >= '3.7' +stripe==2.76.0 +svglib==1.5.1; python_version >= '3.7' +tinycss2==1.2.1; python_version >= '3.7' +tomli==2.0.1; python_version < '3.11' +tox==4.11.4; python_version >= '3.8' +tqdm==4.66.1; python_version >= '3.7' +typing-extensions==4.9.0; python_version >= '3.8' +tzlocal==5.2; python_version >= '3.8' +uritemplate==4.1.1; python_version >= '3.6' +uritools==4.0.2; python_version >= '3.7' +urllib3==1.26.18; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5' +vine==1.3.0; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3' +virtualenv==20.25.0; python_version >= '3.7' +webencodings==0.5.1 +wikipedia==1.4.0 +xhtml2pdf==0.2.15 diff --git a/requirements_versioned.pip b/requirements_versioned.pip deleted file mode 100644 index f1e0f02db..000000000 --- a/requirements_versioned.pip +++ /dev/null @@ -1,107 +0,0 @@ -Django==1.8.14 -Fabric==1.6.0 -MySQL-python==1.2.5 -Pillow==3.4.2 -PyJWT==1.4.1 -PyPDF2==1.26 -PyGithub==1.15.0 -PyYAML==3.11 -amqp==1.4.9 -anyjson==0.3.3 -billiard==3.3.0.23 -awscli==1.15.33 -boto==2.48.0 -boto3==1.7.33 -celery==3.1.23 -certifi==2018.4.16 -chardet==3.0.4 -# pip installing pillow seems to delete distribute -# but having distribute in requirements starting to cause problems -# distribute==0.6.28 -django-celery==3.1.17 -django-ckeditor==4.5.1 -#django-email-change==0.2.3 -git+git://github.com/eshellman/django-email-change.git@57169bdef1c8a41d122e2bab2dcd8564b8fb231d -django-compat==1.0.10 -django-contrib-comments==1.7.1 -django-el-pagination==3.2.4 -django-extensions==1.6.1 -django-jsonfield==1.0.0 -#django-kombu==0.9.4 -django-maintenancemode==0.11.2 -django-mptt==0.8.5 -#django-notification==0.2 -git+git://github.com/eshellman/django-notification.git@a4620e893e2da220994e0189bf5d980bfbdcf0ad -django-registration==2.1.2 -django-selectable==0.9.0 -django-smtp-ssl==1.0 -django-storages==1.5.2 -django-tastypie==0.13.3 -#django-transmeta==0.7.3 -git+git://github.com/resulto/django-transmeta.git@ad4d7278ba330dcf8c8446f8ae9b2c769ae8684e -fef-questionnaire==4.0.1 -#gitenberg.metadata==0.1.6 -git+https://github.com/gitenberg-dev/gitberg-build -#git+ssh://git@github.com/gitenberg-dev/metadata.git@0.1.11 -github3.py==0.9.5 -html5lib==1.0.1 -httplib2==0.7.5 -isodate==0.5.1 -kombu==3.0.35 -lxml==4.2.1 -defusedxml==0.4.1 -mechanize==0.2.5 -mimeparse==0.1.3 -nose==1.1.2 -numpy==1.11.2 -oauth2==1.5.211 -oauthlib==1.1.2 -pandas==0.19.1 -paramiko==1.14.1 -mailchimp3==3.0.4 -pycrypto==2.6 -pymarc==3.0.2 -pyoai==2.5.0 -pyparsing==2.0.3 -python-dateutil==2.5.3 -python-mimeparse==0.1.4 -python-openid==2.2.5 -python-social-auth==0.2.21 -pytz==2016.6.1 -rdflib==4.2.0 -rdflib-jsonld==0.3 -redis==2.10.3 -reportlab==3.4.0 -requests==2.18.4 -requests-mock==1.2.0 -requests-oauthlib==0.6.2 -selenium==2.53.1 -six==1.11.0 -sorl-thumbnail==12.3 -ssh==1.7.14 -stevedore==1.12.0 -stripe==1.9.1 -virtualenv==1.4.9 -# virtualenv-clone==0.2.4 not sure why I have this in my env -#virtualenvwrapper==3.6 -wsgiref==0.1.2 -xhtml2pdf==0.2.2 -webencodings==0.5.1 -#for urllib3 secure -cffi==1.7.0 -cryptography==2.2.2 -enum34==1.1.6 -idna==2.6 -ipaddress==1.0.16 -ndg-httpsclient==0.4.2 -pyOpenSSL==18.0.0 -pyasn1==0.1.9 -pycparser==2.14 -setuptools==25.0.0 -urllib3==1.22 -beautifulsoup4==4.6.0 -RISparser==0.4.2 -# include these 2 for development -#libsass==0.13.4 -#django-compressor==2.2 -django-sass-processor==0.5.6 diff --git a/settings/common.py b/settings/common.py index 5d746c373..c9989cda7 100644 --- a/settings/common.py +++ b/settings/common.py @@ -7,6 +7,9 @@ import regluit from regluit.payment.parameters import PAYMENT_HOST_PAYPAL, PAYMENT_HOST_AMAZON +from regluit.utils import custom_logging +import logging.handlers +logging.handlers.GroupWriteRotatingFileHandler = custom_logging.GroupWriteRotatingFileHandler PROJECT_DIR = dirname(dirname(realpath(__file__))) @@ -15,6 +18,7 @@ ('en', 'English'), ) LOCAL_TEST = False +TEST_PLATFORM = 'production' TESTING = sys.argv[1:2] == ['test'] # detect if we're running tests (used to turn off a repair migration) ALLOWED_HOSTS = ['.unglue.it', '.unglueit.com',] @@ -41,7 +45,8 @@ # set once instead of in all the templates JQUERY_HOME = "/static/js/jquery-1.12.4.min.js" -JQUERY_UI_HOME = "/static/js/jquery-ui-1.8.16.custom.min.js" +JQUERY_UI_HOME = "/static/js/jquery-ui-1.11.4.min.js" +JQUERY_UI_THEME = "/static/css/ui-lightness/jquery-ui-1.11.4.min.css" CKEDITOR_UPLOAD_PATH = '' CKEDITOR_RESTRICT_BY_USER = True @@ -100,7 +105,6 @@ 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [join(PROJECT_DIR, "frontend", "templates"), join(PROJECT_DIR, "frontend", "templates", "registration"), - join(PROJECT_DIR, "frontend", "questionnaire"), ], 'OPTIONS':{ 'context_processors':[ @@ -124,23 +128,21 @@ ] -MIDDLEWARE_CLASSES = ( +MIDDLEWARE = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', - 'maintenancemode.middleware.MaintenanceModeMiddleware', 'regluit.libraryauth.auth.SocialAuthExceptionMiddlewareWithoutMessages', 'django.middleware.locale.LocaleMiddleware', - 'questionnaire.request_cache.RequestCacheMiddleware', ) ROOT_URLCONF = 'regluit.urls' INSTALLED_APPS = ( 'django.contrib.auth', - 'django.contrib.contenttypes', + 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.sitemaps', @@ -157,45 +159,34 @@ 'regluit.payment', 'regluit.utils', 'registration', - 'social.apps.django_app.default', + 'social_django', 'tastypie', - 'djcelery', 'el_pagination', 'selectable', 'regluit.frontend.templatetags', 'notification', 'email_change', 'ckeditor', - 'storages', + 'ckeditor_uploader', + 'storages', 'sorl.thumbnail', - 'mptt', - # this must appear *after* django.frontend or else it overrides the + 'mptt', + # this must appear *after* django.frontend or else it overrides the # registration templates in frontend/templates/registration 'django.contrib.admin', - 'regluit.distro', + 'regluit.distro', 'regluit.booxtream', - 'regluit.pyepub', - 'regluit.libraryauth', - 'transmeta', + 'pyepub', + 'regluit.libraryauth', 'questionnaire', - 'questionnaire.page', 'sass_processor', ) SASS_PROCESSOR_INCLUDE_DIRS = [ os.path.join(PROJECT_DIR, 'static', 'scss'), - os.path.join('static', 'scss'), - os.path.join(PROJECT_DIR, 'static', 'scss', 'foundation', 'scss'), - os.path.join('static', 'scss', 'foundation', 'scss'), - # static/scss/foundation/scss/foundation.scss ] SASS_PROCESSOR_AUTO_INCLUDE = False -# A sample logging configuration. The only tangible logging -# performed by this configuration is to send an email to -# the site admins on every HTTP 500 error. -# See http://docs.djangoproject.com/en/dev/topics/logging for -# more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': True, @@ -203,6 +194,13 @@ 'brief': { 'format': '%(asctime)s %(levelname)s %(name)s[%(funcName)s]: %(message)s', }, + 'django.server': { + '()': 'django.utils.log.ServerFormatter', + 'format': '[%(server_time)s] %(message)s', + }, + 'dl': { + 'format': '%(asctime)s : %(message)s', + }, }, 'filters': { 'require_debug_false': { @@ -217,12 +215,25 @@ }, 'file': { 'level': 'INFO', - 'class': 'logging.handlers.RotatingFileHandler', + 'class': 'logging.handlers.GroupWriteRotatingFileHandler', 'filename': join(PROJECT_DIR, 'logs', 'unglue.it.log'), 'maxBytes': 1024*1024*5, # 5 MB 'backupCount': 5, 'formatter': 'brief', }, + 'django.server': { + 'level': 'INFO', + 'class': 'logging.StreamHandler', + 'formatter': 'django.server', + }, + 'downloads': { + 'level': 'INFO', + 'class': 'logging.handlers.GroupWriteRotatingFileHandler', + 'filename': join(PROJECT_DIR, 'logs', 'downloads.log'), + 'maxBytes': 1024*1024*10, # 10 MB + 'backupCount': 5, + 'formatter': 'dl', + }, }, 'loggers': { 'django.request': { @@ -230,10 +241,31 @@ 'level': 'ERROR', 'propagate': True, }, + 'django.server': { + 'handlers': ['django.server'], + 'level': 'INFO', + 'propagate': False, + }, + 'regluit.downloads': { + 'handlers': ['downloads'], + 'propagate': False, + }, '': { 'handlers': ['file'], 'level': 'INFO', - } + }, +# uncomment to do SQL logging +# 'django.db.backends': { +# 'level': 'DEBUG', +# 'handlers': ['file'], +# }, + }, +} + +CACHES = { + 'default': { + 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', + 'LOCATION': 'regluit', } } @@ -247,18 +279,12 @@ # django-socialauth AUTHENTICATION_BACKENDS = ( - 'social.backends.google.GoogleOAuth2', - 'social.backends.twitter.TwitterOAuth', - 'social.backends.yahoo.YahooOpenId', - 'social.backends.facebook.FacebookOAuth2', - 'social.backends.open_id.OpenIdAuth', + 'social_core.backends.google.GoogleOAuth2', + 'social_core.backends.open_id.OpenIdAuth', 'django.contrib.auth.backends.ModelBackend', ) -SOCIAL_AUTH_ENABLED_BACKENDS = ['google', 'facebook', 'twitter'] -#SOCIAL_AUTH_ASSOCIATE_BY_MAIL = True SOCIAL_AUTH_NEW_USER_REDIRECT_URL = '/' -FACEBOOK_SOCIAL_AUTH_BACKEND_ERROR_URL = '/' SOCIAL_AUTH_SLUGIFY_USERNAMES = True SOCIAL_AUTH_NONCE_SERVER_URL_LENGTH = 200 SOCIAL_AUTH_ASSOCIATION_SERVER_URL_LENGTH = 135 @@ -269,50 +295,50 @@ # format to create the user instance later. On some cases the details are # already part of the auth response from the provider, but sometimes this # could hit a provider API. - 'social.pipeline.social_auth.social_details', + 'social_core.pipeline.social_auth.social_details', # Get the social uid from whichever service we're authing thru. The uid is # the unique identifier of the given user in the provider. - 'social.pipeline.social_auth.social_uid', + 'social_core.pipeline.social_auth.social_uid', # Verifies that the current auth process is valid within the current # project, this is were emails and domains whitelists are applied (if # defined). - 'social.pipeline.social_auth.auth_allowed', + 'social_core.pipeline.social_auth.auth_allowed', # Checks if the current social-account is already associated in the site. 'regluit.libraryauth.auth.selective_social_user', # Make up a username for this person, appends a random string at the end if # there's any collision. - 'social.pipeline.user.get_username', - + 'social_core.pipeline.user.get_username', + # make username < 222 in length 'regluit.libraryauth.auth.chop_username', - + # Send a validation email to the user to verify its email address. # Disabled by default. - # 'social.pipeline.mail.mail_validation', - + # 'social_core.pipeline.mail.mail_validation', + # Associates the current social details with another user account with - # a similar email address. don't use twitter or facebook to log in + # a similar email address. 'regluit.libraryauth.auth.selectively_associate_by_email', # Create a user account if we haven't found one yet. - 'social.pipeline.user.create_user', + 'social_core.pipeline.user.create_user', # Create the record that associated the social account with this user. - 'social.pipeline.social_auth.associate_user', - + 'social_core.pipeline.social_auth.associate_user', + # Populate the extra_data field in the social record with the values # specified by settings (and the default ones like access_token, etc). - 'social.pipeline.social_auth.load_extra_data', + 'social_core.pipeline.social_auth.load_extra_data', # add extra data to user profile 'regluit.libraryauth.auth.deliver_extra_data', # Update the user record with any changed info from the auth service. - 'social.pipeline.user.user_details' + 'social_core.pipeline.user.user_details' ) SOCIAL_AUTH_TWITTER_EXTRA_DATA = [('profile_image_url_https', 'profile_image_url_https'),('screen_name','screen_name')] @@ -322,9 +348,9 @@ LOGOUT_URL = "/accounts/logout/" LOGIN_ERROR_URL = '/accounts/login-error/' -USER_AGENT = "unglue.it.bot v0.0.1 " +USER_AGENT = "unglue.it.bot v0.0.1 (https://unglue.it)" -# The amount of the transaction that Gluejar takes +# The amount of the transaction that Gluejar takes GLUEJAR_COMMISSION = 0.06 PREAPPROVAL_PERIOD = 365 # days to ask for in a preapproval PREAPPROVAL_PERIOD_AFTER_CAMPAIGN = 90 # if we ask for preapproval time after a campaign deadline @@ -333,9 +359,6 @@ # How many days we will try to collect on failed transactions until they are written off RECHARGE_WINDOW = 14 -GOODREADS_API_KEY = "" -GOODREADS_API_SECRET = "" - # unglue.it parameters UNGLUEIT_MINIMUM_TARGET = 500 # in US Dollars UNGLUEIT_MAXIMUM_TARGET = 10000000 # in US Dollars @@ -344,40 +367,39 @@ B2U_TERM = datetime.timedelta(days=5*365 +1 ) # 5 years? MAX_CC_DATE = datetime.date( 2099,12,31) -TEST_RUNNER = "djcelery.contrib.test_runner.CeleryTestSuiteRunner" -import djcelery -djcelery.setup_loader() - # Mailchimp archive JavaScript URL CAMPAIGN_ARCHIVE_JS = "http://us2.campaign-archive1.com/generate-js/?u=15472878790f9faa11317e085&fid=28161&show=10" +# use redis for production queue and results +BROKER_URL = "redis://127.0.0.1:6379/0" +CELERY_RESULT_BACKEND = "redis://127.0.0.1:6379/1" +CELERY_LOG_DIR = "" + # periodic tasks for celery -# start out with nothing scheduled -CELERYBEAT_SCHEDULE = {} from celery.schedules import crontab # define some periodic tasks SEND_TEST_EMAIL_JOB = { "task": "regluit.core.tasks.send_mail_task", - "schedule": crontab(hour=18, minute=20), - "args": ('hi there 18:20', 'testing 1, 2, 3', 'notices@gluejar.com', ['raymond.yee@gmail.com']) + "schedule": crontab(minute='0,5,10'), + "args": ('hi there 20,25,30', 'testing 1, 2, 3', 'notices@gluejar.com', ['unglueit@ebookfoundation.org']) } UPDATE_ACTIVE_CAMPAIGN_STATUSES = { "task": "regluit.core.tasks.update_active_campaign_status", - "schedule": crontab(hour=0, minute=1), + "schedule": crontab(day_of_month='*', hour=0, minute=1), "args": () } EBOOK_NOTIFICATIONS_JOB = { "task": "regluit.core.tasks.report_new_ebooks", - "schedule": crontab(hour=0, minute=30), - "args": () + "schedule": crontab(day_of_month='*', hour=0, minute=30), + "args": () } NOTIFY_ENDING_SOON_JOB = { "task": "regluit.core.tasks.notify_ending_soon", - "schedule": crontab(hour=1, minute=0), + "schedule": crontab(day_of_month='*', hour=1, minute=0), "args": () } @@ -396,13 +418,37 @@ NOTIFY_EXPIRING_ACCOUNTS = { "task": "regluit.payment.tasks.notify_expiring_accounts", "schedule": crontab(day_of_month=22, hour=0, minute=30), - "args": () + "args": () } NOTIFY_UNCLAIMED_GIFTS = { "task": "regluit.core.tasks.notify_unclaimed_gifts", - "schedule": crontab( hour=2, minute=15), - "args": () + "schedule": crontab(day_of_month='*', hour=2, minute=15), + "args": () +} + +SAVE_INFO_PAGE = { + "task": "regluit.frontend.tasks.save_info_page", + "schedule": crontab(day_of_month='*', hour=0, minute=40), + "args": () +} + +PERIODIC_CLEANUP = { + "task": "regluit.core.tasks.periodic_cleanup", + "schedule": crontab(day_of_month='1,11,21', hour=0, minute=35), + "args": () +} + +EMIT_NOTICES = { + "task": "regluit.core.tasks.emit_notifications", + "schedule": crontab(minute='1,11,21,31,41,51'), + "args": () +} + +FEATURE_NEW_WORK = { + "task": "regluit.core.tasks.feature_new_work", + "schedule": crontab(day_of_week=1, hour=9, minute=30), + "args": () } # by default, in common, we don't turn any of the celerybeat jobs on -- turn them on in the local settings file @@ -412,19 +458,19 @@ # amazon or paypal for now. PAYMENT_PROCESSOR = 'stripelib' - -# by default, we are not in maintenance mode -- set True in overriding settings files for maintenance mode -# http://pypi.python.org/pypi/django-maintenancemode/ -MAINTENANCE_MODE = False -# Sequence of URL path regexes to exclude from the maintenance mode. -MAINTENANCE_IGNORE_URLS = {} - +# allow application code to catch thumbnailing errors +THUMBNAIL_DEBUG = True +THUMBNAIL_FORCE_OVERWRITE = False +THUMBNAIL_REMOVE_URL_ARGS = False +THUMBNAIL_URL_TIMEOUT = 60 +# use redis +# THUMBNAIL_KVSTORE = 'sorl.thumbnail.kvstores.redis_kvstore.KVStore' # we should suppress Google Analytics outside of production SHOW_GOOGLE_ANALYTICS = False # to enable uploading to S3 and integration of django-storages + django-ckeditor -# some variables to be overriddden in more specific settings files -- e.g., prod.py, +# some variables to be overriddden in more specific settings files -- e.g., prod.py, CKEDITOR_ALLOW_NONIMAGE_FILES = False AWS_ACCESS_KEY_ID = '' @@ -468,19 +514,19 @@ TEST_PDF_URL = "https://github.com/Gluejar/flatland/raw/master/downloads/Flatland.pdf" FILE_UPLOAD_MAX_MEMORY_SIZE = 20971520 #20MB -QUESTIONNAIRE_USE_SESSION = False -QUESTIONNAIRE_DEBUG = True -QUESTIONNAIRE_ITEM_MODEL = 'core.Work' -QUESTIONNAIRE_SHOW_ITEM_RESULTS = False +USE_OPENLIBRARY = False # Selenium related -- set if Se tests run FIREFOX_PATH = '' CHROMEDRIVER_PATH = '' +GOOGLEBOT_UA = "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)" +CHROME_UA = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.5 Safari/605.1.15' + try: from .keys.common import * except ImportError: - print 'no real key file found, using dummy' + print('no real key file found, using dummy') from .dummy.common import * try: @@ -494,4 +540,7 @@ if AWS_SECRET_ACCESS_KEY: DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage' else: - DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage' + DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage' + +# we wond't record downloads for an ebook if their more than this in a month +DOWNLOAD_LOGS_MAX = 499 \ No newline at end of file diff --git a/settings/dev.py b/settings/dev.py index db16a8e19..e1e805ace 100644 --- a/settings/dev.py +++ b/settings/dev.py @@ -59,19 +59,14 @@ #BASE_URL = 'http://0.0.0.0' BASE_URL_SECURE = 'https://0.0.0.0' -# use redis as queuing service -BROKER_TRANSPORT = "redis" -BROKER_HOST = "localhost" -BROKER_PORT = 6379 -BROKER_VHOST = "0" - # send celery log to Python logging -CELERYD_HIJACK_ROOT_LOGGER = False +WORKER_HIJACK_ROOT_LOGGER = False # a debug_toolbar setting INTERNAL_IPS = ('127.0.0.1',) -CELERYD_LOG_LEVEL = "INFO" +# start out with nothing scheduled +CELERYBEAT_SCHEDULE = {} # decide which of the period tasks to add to the schedule #CELERYBEAT_SCHEDULE['send_test_email'] = SEND_TEST_EMAIL_JOB diff --git a/settings/dummy/common.py b/settings/dummy/common.py index f60333b7d..f53ac0d76 100644 --- a/settings/dummy/common.py +++ b/settings/dummy/common.py @@ -8,6 +8,6 @@ GITHUB_PUBLIC_TOKEN = os.environ.get('GITHUB_PUBLIC_TOKEN', None) # 40 chars; null has lower limit MAILCHIMP_API_KEY = os.environ.get('MAILCHIMP_API_KEY', '-us2') # [32chars]-xx# MAILCHIMP_NEWS_ID = os.environ.get('MAILCHIMP_NEWS_ID', '0123456789') -MOBIGEN_PASSWORD = os.environ.get('MOBIGEN_PASSWORD', '012345678901234') -MOBIGEN_URL = os.environ.get('MOBIGEN_URL', '') # https://host/mobigen -MOBIGEN_USER_ID = os.environ.get('MOBIGEN_USER_ID', 'user') +STRIPE_PK = os.environ.get('STRIPE_PK', 'user') +STRIPE_SK = os.environ.get('STRIPE_SK', 'user') + diff --git a/settings/dummy/host.py b/settings/dummy/host.py index 9819dc7f9..f12b84d2a 100644 --- a/settings/dummy/host.py +++ b/settings/dummy/host.py @@ -10,30 +10,16 @@ # https://code.google.com/apis/console GOOGLE_BOOKS_API_KEY = os.environ.get("GOOGLE_BOOKS_API_KEY", "012345678901234567890123456789012345678") -# -GOODREADS_API_KEY = os.environ.get("GOODREADS_API_KEY", "01234567890123456789") -GOODREADS_API_SECRET = os.environ.get("GOODREADS_API_SECRET", None) #43 chars # Amazon SES # create with https://console.aws.amazon.com/ses/home?region=us-east-1#smtp-settings: EMAIL_HOST_USER = os.environ.get("EMAIL_HOST_USER", '01234567890123456789') EMAIL_HOST_PASSWORD = os.environ.get("EMAIL_HOST_PASSWORD", '01234567890123456789012345678901234567890123') -# twitter auth -# you'll need to create a new Twitter application to fill in these blanks -# https://dev.twitter.com/apps/new -SOCIAL_AUTH_TWITTER_KEY = os.environ.get("SOCIAL_AUTH_TWITTER_KEY", '0123456789012345678901234') -SOCIAL_AUTH_TWITTER_SECRET = os.environ.get("SOCIAL_AUTH_TWITTER_SECRET", '01234567890123456789012345678901234567890123456789') - # support@icontact.nl BOOXTREAM_API_KEY = os.environ.get("BOOXTREAM_API_KEY", None) # 30 chars BOOXTREAM_API_USER = os.environ.get("BOOXTREAM_API_USER", 'user') -# you'll need to create a new Facebook application to fill in these blanks -# https://developers.facebook.com/apps/ -SOCIAL_AUTH_FACEBOOK_KEY = os.environ.get("SOCIAL_AUTH_FACEBOOK_KEY", '012345678901234') -SOCIAL_AUTH_FACEBOOK_SECRET = os.environ.get("SOCIAL_AUTH_FACEBOOK_SECRET", '01234567890123456789012345678901') - # https://console.developers.google.com/apis/credentials/oauthclient/ # unglue.it (prod) SOCIAL_AUTH_GOOGLE_OAUTH2_KEY #2 SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = os.environ.get("_KEY", '012345678901-01234567890123456789012345678901.apps.googleusercontent.com') diff --git a/settings/just.py b/settings/just.py deleted file mode 100644 index 8f395b01a..000000000 --- a/settings/just.py +++ /dev/null @@ -1,129 +0,0 @@ -# coding=utf-8 -from .common import * - -ALLOWED_HOSTS = ['.unglue.it'] -DEBUG = False -TEMPLATES[0]['OPTIONS']['debug'] = DEBUG - -SITE_ID = 5 - -ADMINS = ( - ('Raymond Yee', 'rdhyee+ungluebugs@gluejar.com'), - ('Eric Hellman', 'eric@gluejar.com'), -) - -MANAGERS = ADMINS - -DATABASES = { - 'default': { - 'ENGINE': 'django.db.backends.mysql', - 'NAME': 'unglueit', - 'USER': DATABASE_USER, - 'PASSWORD': DATABASE_PASSWORD, - 'HOST': DATABASE_HOST, - 'PORT': '', - 'TEST': { - 'CHARSET': 'utf8', - } - } -} - -TIME_ZONE = 'America/New_York' - -# settings for outbout email -# if you have a gmail account you can use your email address and password - -# Amazon SES - -EMAIL_BACKEND = 'django_smtp_ssl.SSLEmailBackend' -MAIL_USE_TLS = True -EMAIL_HOST = 'email-smtp.us-east-1.amazonaws.com' -EMAIL_PORT = 465 -DEFAULT_FROM_EMAIL = 'notices@gluejar.com' - -# send celery log to Python logging -CELERYD_HIJACK_ROOT_LOGGER = False - -# Next step to try https -#BASE_URL = 'http://just.unglue.it' -BASE_URL_SECURE = 'https://just.unglue.it' -IPN_SECURE_URL = False - -# use redis for production queue -BROKER_TRANSPORT = "redis" -BROKER_HOST = "localhost" -BROKER_PORT = 6379 -BROKER_VHOST = "0" - -LOGGING = { - 'version': 1, - 'disable_existing_loggers': True, - 'formatters': { - 'brief': { - 'format': '%(asctime)s %(levelname)s %(name)s[%(funcName)s]: %(message)s', - }, - }, - 'handlers': { - 'mail_admins': { - 'level': 'ERROR', - 'class': 'django.utils.log.AdminEmailHandler' - }, - 'null': { - 'level': 'DEBUG', - 'class': 'logging.NullHandler', - }, - 'file': { - 'level': 'INFO', - 'class': 'logging.handlers.RotatingFileHandler', - 'filename': join('/var/log/regluit', 'unglue.it.log'), - 'maxBytes': 1024*1024*5, # 5 MB - 'backupCount': 5, - 'formatter': 'brief', - }, - }, - 'loggers': { - 'django.request': { - 'handlers': ['mail_admins'], - 'level': 'ERROR', - 'propagate': True, - }, - 'django.security.DisallowedHost': { - 'handlers': ['null'], - 'propagate': False, - }, - '': { - 'handlers': ['file'], - 'level': 'WARNING', - 'propagate': False, - }, - } -} - -STATIC_ROOT = '/var/www/static' -#CKEDITOR_UPLOAD_PATH = '/var/www/static/media/' -#CKEDITOR_UPLOAD_PREFIX = 'https://just.unglue.it/static/media/' - -IS_PREVIEW = False - -# decide which of the period tasks to add to the schedule -CELERYBEAT_SCHEDULE['send_test_email'] = SEND_TEST_EMAIL_JOB -CELERYBEAT_SCHEDULE['report_new_ebooks'] = EBOOK_NOTIFICATIONS_JOB - - -CELERYBEAT_SCHEDULE['update_account_statuses'] = UPDATE_ACCOUNT_STATUSES -CELERYBEAT_SCHEDULE['notify_expiring_accounts'] = NOTIFY_EXPIRING_ACCOUNTS -CELERYBEAT_SCHEDULE['refresh_acqs'] = REFRESH_ACQS_JOB - - -# set -- sandbox or production Amazon FPS? -AMAZON_FPS_HOST = "fps.sandbox.amazonaws.com" -#AMAZON_FPS_HOST = "fps.amazonaws.com" - -# local settings for maintenance mode -MAINTENANCE_MODE = False - -# if settings/local.py exists, import those settings -- allows for dynamic generation of parameters such as DATABASES -try: - from regluit.settings.local import * -except ImportError: - pass diff --git a/settings/please.py b/settings/please.py deleted file mode 100644 index d9837a498..000000000 --- a/settings/please.py +++ /dev/null @@ -1,118 +0,0 @@ -from regluit.settings.common import * - -ALLOWED_HOSTS = ['.unglue.it'] -DEBUG = False -TEMPLATES[0]['OPTIONS']['debug'] = DEBUG - -SITE_ID = 2 - -ADMINS = ( - ('Raymond Yee', 'rdhyee+ungluebugs@gluejar.com'), - ('Eric Hellman', 'eric@gluejar.com'), -) - -MANAGERS = ADMINS - -DATABASES = { - 'default': { - 'ENGINE': 'django.db.backends.mysql', - 'NAME': 'regluit', - 'USER': DATABASE_USER, - 'PASSWORD': DATABASE_PASSWORD, - 'HOST': DATABASE_HOST, - 'PORT': '', - 'TEST': { - 'CHARSET': 'utf8', - } - } -} - - -TIME_ZONE = 'America/New_York' - -# settings for outbout email -# if you have a gmail account you can use your email address and password - -# Amazon SES -EMAIL_BACKEND = 'django_smtp_ssl.SSLEmailBackend' -MAIL_USE_TLS = True -EMAIL_HOST = 'email-smtp.us-east-1.amazonaws.com' -EMAIL_PORT = 465 -DEFAULT_FROM_EMAIL = 'notices@gluejar.com' - -# send celery log to Python logging -CELERYD_HIJACK_ROOT_LOGGER = False - -# Next step to try https -#BASE_URL = 'http://please.unglueit.com' -BASE_URL_SECURE = 'https://please.unglueit.com' - -# use redis for production queue -BROKER_TRANSPORT = "redis" -BROKER_HOST = "localhost" -BROKER_PORT = 6379 -BROKER_VHOST = "0" - -LOGGING = { - 'version': 1, - 'disable_existing_loggers': True, - 'formatters': { - 'brief': { - 'format': '%(asctime)s %(levelname)s %(name)s[%(funcName)s]: %(message)s', - }, - }, - 'handlers': { - 'mail_admins': { - 'level': 'ERROR', - 'class': 'django.utils.log.AdminEmailHandler' - }, - 'null': { - 'level': 'DEBUG', - 'class': 'logging.NullHandler', - }, - 'file': { - 'level': 'INFO', - 'class': 'logging.handlers.RotatingFileHandler', - 'filename': join('/var/log/regluit', 'unglue.it.log'), - 'maxBytes': 1024*1024*5, # 5 MB - 'backupCount': 5, - 'formatter': 'brief', - }, - }, - 'loggers': { - 'django.request': { - 'handlers': ['mail_admins'], - 'level': 'ERROR', - 'propagate': True, - }, - 'django.security.DisallowedHost': { - 'handlers': ['null'], - 'propagate': False, - }, - '': { - 'handlers': ['file'], - 'level': 'WARNING', - 'propagate': False, - }, - } -} - -STATIC_ROOT = '/var/www/static' -CKEDITOR_UPLOAD_PATH = '/var/www/static/media/' - -IS_PREVIEW = False - -# decide which of the period tasks to add to the schedule -#CELERYBEAT_SCHEDULE['send_test_email'] = SEND_TEST_EMAIL_JOB -CELERYBEAT_SCHEDULE['report_new_ebooks'] = EBOOK_NOTIFICATIONS_JOB - - -# local settings for maintenance mode -MAINTENANCE_MODE = False - - -# if settings/local.py exists, import those settings -- allows for dynamic generation of parameters such as DATABASES -try: - from regluit.settings.local import * -except ImportError: - pass diff --git a/settings/prod.py b/settings/prod.py index 06870e60d..35dd898c3 100644 --- a/settings/prod.py +++ b/settings/prod.py @@ -9,7 +9,6 @@ SITE_ID = 1 ADMINS = ( - ('Raymond Yee', 'rdhyee+ungluebugs@gluejar.com'), ('Eric Hellman', 'eric@gluejar.com'), ) @@ -25,6 +24,9 @@ 'PORT': '', 'TEST': { 'CHARSET': 'utf8', + }, + 'OPTIONS': { + 'init_command': 'SET max_execution_time=30000' # In milliseconds; requires MySQL 5.7 } } } @@ -37,26 +39,19 @@ # Amazon SES -EMAIL_BACKEND = 'django_smtp_ssl.SSLEmailBackend' MAIL_USE_TLS = True EMAIL_HOST = 'email-smtp.us-east-1.amazonaws.com' EMAIL_PORT = 465 DEFAULT_FROM_EMAIL = 'notices@gluejar.com' # send celery log to Python logging -CELERYD_HIJACK_ROOT_LOGGER = False +WORKER_HIJACK_ROOT_LOGGER = False # Next step to try https #BASE_URL = 'http://unglue.it' BASE_URL_SECURE = 'https://unglue.it' IPN_SECURE_URL = False -# use redis for production queue -BROKER_TRANSPORT = "redis" -BROKER_HOST = "localhost" -BROKER_PORT = 6379 -BROKER_VHOST = "0" - LOGGING = { 'version': 1, 'disable_existing_loggers': True, @@ -105,8 +100,11 @@ #CKEDITOR_UPLOAD_PATH = '/var/www/static/media/' #CKEDITOR_UPLOAD_PREFIX = 'https://unglue.it/static/media/' +# start out with nothing scheduled +CELERYBEAT_SCHEDULE = {} + # decide which of the period tasks to add to the schedule -CELERYBEAT_SCHEDULE['send_test_email'] = SEND_TEST_EMAIL_JOB +# CELERYBEAT_SCHEDULE['send_test_email'] = SEND_TEST_EMAIL_JOB # update the statuses of campaigns CELERYBEAT_SCHEDULE['update_active_campaign_statuses'] = UPDATE_ACTIVE_CAMPAIGN_STATUSES CELERYBEAT_SCHEDULE['report_new_ebooks'] = EBOOK_NOTIFICATIONS_JOB diff --git a/settings/travis.py b/settings/travis.py deleted file mode 100644 index 477b4b416..000000000 --- a/settings/travis.py +++ /dev/null @@ -1,67 +0,0 @@ -# coding=utf-8 -from .common import * - -DEBUG = True -TEMPLATES[0]['OPTIONS']['debug'] = DEBUG -IS_PREVIEW = False - -ADMINS = ( - ('Raymond Yee', 'rdhyee+ungluebugs@gluejar.com'), - ('Eric Hellman', 'eric@gluejar.com'), -) - -MANAGERS = ADMINS - -DATABASES = { - 'default': { - 'ENGINE': 'django.db.backends.mysql', - 'NAME': 'regluit', - 'USER': 'root', - 'PASSWORD': '', - 'HOST': '', - 'PORT': '', - 'TEST_CHARSET': 'utf8', - } -} - -TIME_ZONE = 'America/New_York' - -# settings for outbout email -# if you have a gmail account you can use your email address and password - -EMAIL_BACKEND = 'django_smtp_ssl.SSLEmailBackend' -MAIL_USE_TLS = True -EMAIL_HOST = 'email-smtp.us-east-1.amazonaws.com' -EMAIL_PORT = 465 -DEFAULT_FROM_EMAIL = 'notices@gluejar.com' - - -# formerly of settings/common.py to surface old vars -# TO DO: invalidate before we open source - -# for use with test google account only -GOOGLE_DISPLAY_NAME = 'Unglue.It' -REDIRECT_IS_HTTPS = False - -#BASE_URL = 'http://0.0.0.0/' -BASE_URL_SECURE = 'http://0.0.0.0/' - -BROKER_TRANSPORT = "redis" -BROKER_HOST = "localhost" -BROKER_PORT = 6379 -BROKER_VHOST = "0" - -# Amazon S3 access -AWS_STORAGE_BUCKET_NAME = 'unglueit-testfiles' - -SOCIAL_AUTH_TWITTER_KEY = '' -SOCIAL_AUTH_TWITTER_SECRET = '' -SOCIAL_AUTH_FACEBOOK_KEY = '' -SOCIAL_AUTH_FACEBOOK_SECRET = '' -SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = '' -SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = '' -GOOGLE_BOOKS_API_KEY = '' -TEST_INTEGRATION = False -LOCAL_TEST = True -DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage' - diff --git a/setup.py b/setup.py deleted file mode 100644 index 1e225e383..000000000 --- a/setup.py +++ /dev/null @@ -1,25 +0,0 @@ -import os -from setuptools import find_packages, setup - -with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme: - README = readme.read() - -# allow setup.py to be run from any path -os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir))) - -setup( - name='regluit', - version='3.0.0', - description='Web application for Unglue.it', - author='Free Ebook Foundation', - author_email='info@ebookfoundation.org', - url='https://unglue.it', - packages=find_packages(exclude=[ - 'bookdata', - 'deploy' - 'logs', - 'selenium', - 'static', - 'vagrant', - ]), -) \ No newline at end of file diff --git a/start.sh b/start.sh deleted file mode 100755 index 90fab66e8..000000000 --- a/start.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -django-admin.py celeryd --loglevel=INFO & -django-admin.py celerybeat -l INFO & -django-admin.py runserver 0.0.0.0:8000 diff --git a/static/.well-known/acme-challenge/test b/static/.well-known/acme-challenge/test new file mode 100644 index 000000000..224e3ffb2 --- /dev/null +++ b/static/.well-known/acme-challenge/test @@ -0,0 +1 @@ +surprise! \ No newline at end of file diff --git a/static/css/font-awesome.min.css b/static/css/font-awesome.min.css deleted file mode 100644 index 24fcc04c4..000000000 --- a/static/css/font-awesome.min.css +++ /dev/null @@ -1,4 +0,0 @@ -/*! - * Font Awesome 4.3.0 by @davegandy - http://fontawesome.io - @fontawesome - * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) - */@font-face{font-family:'FontAwesome';src:url('../fonts/fontawesome-webfont.eot?v=4.3.0');src:url('../fonts/fontawesome-webfont.eot?#iefix&v=4.3.0') format('embedded-opentype'),url('../fonts/fontawesome-webfont.woff2?v=4.3.0') format('woff2'),url('../fonts/fontawesome-webfont.woff?v=4.3.0') format('woff'),url('../fonts/fontawesome-webfont.ttf?v=4.3.0') format('truetype'),url('../fonts/fontawesome-webfont.svg?v=4.3.0#fontawesomeregular') format('svg');font-weight:normal;font-style:normal}.fa{display:inline-block;font:normal normal normal 14px/1 FontAwesome;font-size:inherit;text-rendering:auto;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale;transform:translate(0, 0)}.fa-lg{font-size:1.33333333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.28571429em;text-align:center}.fa-ul{padding-left:0;margin-left:2.14285714em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.14285714em;width:2.14285714em;top:.14285714em;text-align:center}.fa-li.fa-lg{left:-1.85714286em}.fa-border{padding:.2em .25em .15em;border:solid .08em #eee;border-radius:.1em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left{margin-right:.3em}.fa.pull-right{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s infinite linear;animation:fa-spin 2s infinite linear}.fa-pulse{-webkit-animation:fa-spin 1s infinite steps(8);animation:fa-spin 1s infinite steps(8)}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=1);-webkit-transform:rotate(90deg);-ms-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=2);-webkit-transform:rotate(180deg);-ms-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=3);-webkit-transform:rotate(270deg);-ms-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1);-webkit-transform:scale(-1, 1);-ms-transform:scale(-1, 1);transform:scale(-1, 1)}.fa-flip-vertical{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1);-webkit-transform:scale(1, -1);-ms-transform:scale(1, -1);transform:scale(1, -1)}:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270,:root .fa-flip-horizontal,:root .fa-flip-vertical{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:"\f000"}.fa-music:before{content:"\f001"}.fa-search:before{content:"\f002"}.fa-envelope-o:before{content:"\f003"}.fa-heart:before{content:"\f004"}.fa-star:before{content:"\f005"}.fa-star-o:before{content:"\f006"}.fa-user:before{content:"\f007"}.fa-film:before{content:"\f008"}.fa-th-large:before{content:"\f009"}.fa-th:before{content:"\f00a"}.fa-th-list:before{content:"\f00b"}.fa-check:before{content:"\f00c"}.fa-remove:before,.fa-close:before,.fa-times:before{content:"\f00d"}.fa-search-plus:before{content:"\f00e"}.fa-search-minus:before{content:"\f010"}.fa-power-off:before{content:"\f011"}.fa-signal:before{content:"\f012"}.fa-gear:before,.fa-cog:before{content:"\f013"}.fa-trash-o:before{content:"\f014"}.fa-home:before{content:"\f015"}.fa-file-o:before{content:"\f016"}.fa-clock-o:before{content:"\f017"}.fa-road:before{content:"\f018"}.fa-download:before{content:"\f019"}.fa-arrow-circle-o-down:before{content:"\f01a"}.fa-arrow-circle-o-up:before{content:"\f01b"}.fa-inbox:before{content:"\f01c"}.fa-play-circle-o:before{content:"\f01d"}.fa-rotate-right:before,.fa-repeat:before{content:"\f01e"}.fa-refresh:before{content:"\f021"}.fa-list-alt:before{content:"\f022"}.fa-lock:before{content:"\f023"}.fa-flag:before{content:"\f024"}.fa-headphones:before{content:"\f025"}.fa-volume-off:before{content:"\f026"}.fa-volume-down:before{content:"\f027"}.fa-volume-up:before{content:"\f028"}.fa-qrcode:before{content:"\f029"}.fa-barcode:before{content:"\f02a"}.fa-tag:before{content:"\f02b"}.fa-tags:before{content:"\f02c"}.fa-book:before{content:"\f02d"}.fa-bookmark:before{content:"\f02e"}.fa-print:before{content:"\f02f"}.fa-camera:before{content:"\f030"}.fa-font:before{content:"\f031"}.fa-bold:before{content:"\f032"}.fa-italic:before{content:"\f033"}.fa-text-height:before{content:"\f034"}.fa-text-width:before{content:"\f035"}.fa-align-left:before{content:"\f036"}.fa-align-center:before{content:"\f037"}.fa-align-right:before{content:"\f038"}.fa-align-justify:before{content:"\f039"}.fa-list:before{content:"\f03a"}.fa-dedent:before,.fa-outdent:before{content:"\f03b"}.fa-indent:before{content:"\f03c"}.fa-video-camera:before{content:"\f03d"}.fa-photo:before,.fa-image:before,.fa-picture-o:before{content:"\f03e"}.fa-pencil:before{content:"\f040"}.fa-map-marker:before{content:"\f041"}.fa-adjust:before{content:"\f042"}.fa-tint:before{content:"\f043"}.fa-edit:before,.fa-pencil-square-o:before{content:"\f044"}.fa-share-square-o:before{content:"\f045"}.fa-check-square-o:before{content:"\f046"}.fa-arrows:before{content:"\f047"}.fa-step-backward:before{content:"\f048"}.fa-fast-backward:before{content:"\f049"}.fa-backward:before{content:"\f04a"}.fa-play:before{content:"\f04b"}.fa-pause:before{content:"\f04c"}.fa-stop:before{content:"\f04d"}.fa-forward:before{content:"\f04e"}.fa-fast-forward:before{content:"\f050"}.fa-step-forward:before{content:"\f051"}.fa-eject:before{content:"\f052"}.fa-chevron-left:before{content:"\f053"}.fa-chevron-right:before{content:"\f054"}.fa-plus-circle:before{content:"\f055"}.fa-minus-circle:before{content:"\f056"}.fa-times-circle:before{content:"\f057"}.fa-check-circle:before{content:"\f058"}.fa-question-circle:before{content:"\f059"}.fa-info-circle:before{content:"\f05a"}.fa-crosshairs:before{content:"\f05b"}.fa-times-circle-o:before{content:"\f05c"}.fa-check-circle-o:before{content:"\f05d"}.fa-ban:before{content:"\f05e"}.fa-arrow-left:before{content:"\f060"}.fa-arrow-right:before{content:"\f061"}.fa-arrow-up:before{content:"\f062"}.fa-arrow-down:before{content:"\f063"}.fa-mail-forward:before,.fa-share:before{content:"\f064"}.fa-expand:before{content:"\f065"}.fa-compress:before{content:"\f066"}.fa-plus:before{content:"\f067"}.fa-minus:before{content:"\f068"}.fa-asterisk:before{content:"\f069"}.fa-exclamation-circle:before{content:"\f06a"}.fa-gift:before{content:"\f06b"}.fa-leaf:before{content:"\f06c"}.fa-fire:before{content:"\f06d"}.fa-eye:before{content:"\f06e"}.fa-eye-slash:before{content:"\f070"}.fa-warning:before,.fa-exclamation-triangle:before{content:"\f071"}.fa-plane:before{content:"\f072"}.fa-calendar:before{content:"\f073"}.fa-random:before{content:"\f074"}.fa-comment:before{content:"\f075"}.fa-magnet:before{content:"\f076"}.fa-chevron-up:before{content:"\f077"}.fa-chevron-down:before{content:"\f078"}.fa-retweet:before{content:"\f079"}.fa-shopping-cart:before{content:"\f07a"}.fa-folder:before{content:"\f07b"}.fa-folder-open:before{content:"\f07c"}.fa-arrows-v:before{content:"\f07d"}.fa-arrows-h:before{content:"\f07e"}.fa-bar-chart-o:before,.fa-bar-chart:before{content:"\f080"}.fa-twitter-square:before{content:"\f081"}.fa-facebook-square:before{content:"\f082"}.fa-camera-retro:before{content:"\f083"}.fa-key:before{content:"\f084"}.fa-gears:before,.fa-cogs:before{content:"\f085"}.fa-comments:before{content:"\f086"}.fa-thumbs-o-up:before{content:"\f087"}.fa-thumbs-o-down:before{content:"\f088"}.fa-star-half:before{content:"\f089"}.fa-heart-o:before{content:"\f08a"}.fa-sign-out:before{content:"\f08b"}.fa-linkedin-square:before{content:"\f08c"}.fa-thumb-tack:before{content:"\f08d"}.fa-external-link:before{content:"\f08e"}.fa-sign-in:before{content:"\f090"}.fa-trophy:before{content:"\f091"}.fa-github-square:before{content:"\f092"}.fa-upload:before{content:"\f093"}.fa-lemon-o:before{content:"\f094"}.fa-phone:before{content:"\f095"}.fa-square-o:before{content:"\f096"}.fa-bookmark-o:before{content:"\f097"}.fa-phone-square:before{content:"\f098"}.fa-twitter:before{content:"\f099"}.fa-facebook-f:before,.fa-facebook:before{content:"\f09a"}.fa-github:before{content:"\f09b"}.fa-unlock:before{content:"\f09c"}.fa-credit-card:before{content:"\f09d"}.fa-rss:before{content:"\f09e"}.fa-hdd-o:before{content:"\f0a0"}.fa-bullhorn:before{content:"\f0a1"}.fa-bell:before{content:"\f0f3"}.fa-certificate:before{content:"\f0a3"}.fa-hand-o-right:before{content:"\f0a4"}.fa-hand-o-left:before{content:"\f0a5"}.fa-hand-o-up:before{content:"\f0a6"}.fa-hand-o-down:before{content:"\f0a7"}.fa-arrow-circle-left:before{content:"\f0a8"}.fa-arrow-circle-right:before{content:"\f0a9"}.fa-arrow-circle-up:before{content:"\f0aa"}.fa-arrow-circle-down:before{content:"\f0ab"}.fa-globe:before{content:"\f0ac"}.fa-wrench:before{content:"\f0ad"}.fa-tasks:before{content:"\f0ae"}.fa-filter:before{content:"\f0b0"}.fa-briefcase:before{content:"\f0b1"}.fa-arrows-alt:before{content:"\f0b2"}.fa-group:before,.fa-users:before{content:"\f0c0"}.fa-chain:before,.fa-link:before{content:"\f0c1"}.fa-cloud:before{content:"\f0c2"}.fa-flask:before{content:"\f0c3"}.fa-cut:before,.fa-scissors:before{content:"\f0c4"}.fa-copy:before,.fa-files-o:before{content:"\f0c5"}.fa-paperclip:before{content:"\f0c6"}.fa-save:before,.fa-floppy-o:before{content:"\f0c7"}.fa-square:before{content:"\f0c8"}.fa-navicon:before,.fa-reorder:before,.fa-bars:before{content:"\f0c9"}.fa-list-ul:before{content:"\f0ca"}.fa-list-ol:before{content:"\f0cb"}.fa-strikethrough:before{content:"\f0cc"}.fa-underline:before{content:"\f0cd"}.fa-table:before{content:"\f0ce"}.fa-magic:before{content:"\f0d0"}.fa-truck:before{content:"\f0d1"}.fa-pinterest:before{content:"\f0d2"}.fa-pinterest-square:before{content:"\f0d3"}.fa-google-plus-square:before{content:"\f0d4"}.fa-google-plus:before{content:"\f0d5"}.fa-money:before{content:"\f0d6"}.fa-caret-down:before{content:"\f0d7"}.fa-caret-up:before{content:"\f0d8"}.fa-caret-left:before{content:"\f0d9"}.fa-caret-right:before{content:"\f0da"}.fa-columns:before{content:"\f0db"}.fa-unsorted:before,.fa-sort:before{content:"\f0dc"}.fa-sort-down:before,.fa-sort-desc:before{content:"\f0dd"}.fa-sort-up:before,.fa-sort-asc:before{content:"\f0de"}.fa-envelope:before{content:"\f0e0"}.fa-linkedin:before{content:"\f0e1"}.fa-rotate-left:before,.fa-undo:before{content:"\f0e2"}.fa-legal:before,.fa-gavel:before{content:"\f0e3"}.fa-dashboard:before,.fa-tachometer:before{content:"\f0e4"}.fa-comment-o:before{content:"\f0e5"}.fa-comments-o:before{content:"\f0e6"}.fa-flash:before,.fa-bolt:before{content:"\f0e7"}.fa-sitemap:before{content:"\f0e8"}.fa-umbrella:before{content:"\f0e9"}.fa-paste:before,.fa-clipboard:before{content:"\f0ea"}.fa-lightbulb-o:before{content:"\f0eb"}.fa-exchange:before{content:"\f0ec"}.fa-cloud-download:before{content:"\f0ed"}.fa-cloud-upload:before{content:"\f0ee"}.fa-user-md:before{content:"\f0f0"}.fa-stethoscope:before{content:"\f0f1"}.fa-suitcase:before{content:"\f0f2"}.fa-bell-o:before{content:"\f0a2"}.fa-coffee:before{content:"\f0f4"}.fa-cutlery:before{content:"\f0f5"}.fa-file-text-o:before{content:"\f0f6"}.fa-building-o:before{content:"\f0f7"}.fa-hospital-o:before{content:"\f0f8"}.fa-ambulance:before{content:"\f0f9"}.fa-medkit:before{content:"\f0fa"}.fa-fighter-jet:before{content:"\f0fb"}.fa-beer:before{content:"\f0fc"}.fa-h-square:before{content:"\f0fd"}.fa-plus-square:before{content:"\f0fe"}.fa-angle-double-left:before{content:"\f100"}.fa-angle-double-right:before{content:"\f101"}.fa-angle-double-up:before{content:"\f102"}.fa-angle-double-down:before{content:"\f103"}.fa-angle-left:before{content:"\f104"}.fa-angle-right:before{content:"\f105"}.fa-angle-up:before{content:"\f106"}.fa-angle-down:before{content:"\f107"}.fa-desktop:before{content:"\f108"}.fa-laptop:before{content:"\f109"}.fa-tablet:before{content:"\f10a"}.fa-mobile-phone:before,.fa-mobile:before{content:"\f10b"}.fa-circle-o:before{content:"\f10c"}.fa-quote-left:before{content:"\f10d"}.fa-quote-right:before{content:"\f10e"}.fa-spinner:before{content:"\f110"}.fa-circle:before{content:"\f111"}.fa-mail-reply:before,.fa-reply:before{content:"\f112"}.fa-github-alt:before{content:"\f113"}.fa-folder-o:before{content:"\f114"}.fa-folder-open-o:before{content:"\f115"}.fa-smile-o:before{content:"\f118"}.fa-frown-o:before{content:"\f119"}.fa-meh-o:before{content:"\f11a"}.fa-gamepad:before{content:"\f11b"}.fa-keyboard-o:before{content:"\f11c"}.fa-flag-o:before{content:"\f11d"}.fa-flag-checkered:before{content:"\f11e"}.fa-terminal:before{content:"\f120"}.fa-code:before{content:"\f121"}.fa-mail-reply-all:before,.fa-reply-all:before{content:"\f122"}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:"\f123"}.fa-location-arrow:before{content:"\f124"}.fa-crop:before{content:"\f125"}.fa-code-fork:before{content:"\f126"}.fa-unlink:before,.fa-chain-broken:before{content:"\f127"}.fa-question:before{content:"\f128"}.fa-info:before{content:"\f129"}.fa-exclamation:before{content:"\f12a"}.fa-superscript:before{content:"\f12b"}.fa-subscript:before{content:"\f12c"}.fa-eraser:before{content:"\f12d"}.fa-puzzle-piece:before{content:"\f12e"}.fa-microphone:before{content:"\f130"}.fa-microphone-slash:before{content:"\f131"}.fa-shield:before{content:"\f132"}.fa-calendar-o:before{content:"\f133"}.fa-fire-extinguisher:before{content:"\f134"}.fa-rocket:before{content:"\f135"}.fa-maxcdn:before{content:"\f136"}.fa-chevron-circle-left:before{content:"\f137"}.fa-chevron-circle-right:before{content:"\f138"}.fa-chevron-circle-up:before{content:"\f139"}.fa-chevron-circle-down:before{content:"\f13a"}.fa-html5:before{content:"\f13b"}.fa-css3:before{content:"\f13c"}.fa-anchor:before{content:"\f13d"}.fa-unlock-alt:before{content:"\f13e"}.fa-bullseye:before{content:"\f140"}.fa-ellipsis-h:before{content:"\f141"}.fa-ellipsis-v:before{content:"\f142"}.fa-rss-square:before{content:"\f143"}.fa-play-circle:before{content:"\f144"}.fa-ticket:before{content:"\f145"}.fa-minus-square:before{content:"\f146"}.fa-minus-square-o:before{content:"\f147"}.fa-level-up:before{content:"\f148"}.fa-level-down:before{content:"\f149"}.fa-check-square:before{content:"\f14a"}.fa-pencil-square:before{content:"\f14b"}.fa-external-link-square:before{content:"\f14c"}.fa-share-square:before{content:"\f14d"}.fa-compass:before{content:"\f14e"}.fa-toggle-down:before,.fa-caret-square-o-down:before{content:"\f150"}.fa-toggle-up:before,.fa-caret-square-o-up:before{content:"\f151"}.fa-toggle-right:before,.fa-caret-square-o-right:before{content:"\f152"}.fa-euro:before,.fa-eur:before{content:"\f153"}.fa-gbp:before{content:"\f154"}.fa-dollar:before,.fa-usd:before{content:"\f155"}.fa-rupee:before,.fa-inr:before{content:"\f156"}.fa-cny:before,.fa-rmb:before,.fa-yen:before,.fa-jpy:before{content:"\f157"}.fa-ruble:before,.fa-rouble:before,.fa-rub:before{content:"\f158"}.fa-won:before,.fa-krw:before{content:"\f159"}.fa-bitcoin:before,.fa-btc:before{content:"\f15a"}.fa-file:before{content:"\f15b"}.fa-file-text:before{content:"\f15c"}.fa-sort-alpha-asc:before{content:"\f15d"}.fa-sort-alpha-desc:before{content:"\f15e"}.fa-sort-amount-asc:before{content:"\f160"}.fa-sort-amount-desc:before{content:"\f161"}.fa-sort-numeric-asc:before{content:"\f162"}.fa-sort-numeric-desc:before{content:"\f163"}.fa-thumbs-up:before{content:"\f164"}.fa-thumbs-down:before{content:"\f165"}.fa-youtube-square:before{content:"\f166"}.fa-youtube:before{content:"\f167"}.fa-xing:before{content:"\f168"}.fa-xing-square:before{content:"\f169"}.fa-youtube-play:before{content:"\f16a"}.fa-dropbox:before{content:"\f16b"}.fa-stack-overflow:before{content:"\f16c"}.fa-instagram:before{content:"\f16d"}.fa-flickr:before{content:"\f16e"}.fa-adn:before{content:"\f170"}.fa-bitbucket:before{content:"\f171"}.fa-bitbucket-square:before{content:"\f172"}.fa-tumblr:before{content:"\f173"}.fa-tumblr-square:before{content:"\f174"}.fa-long-arrow-down:before{content:"\f175"}.fa-long-arrow-up:before{content:"\f176"}.fa-long-arrow-left:before{content:"\f177"}.fa-long-arrow-right:before{content:"\f178"}.fa-apple:before{content:"\f179"}.fa-windows:before{content:"\f17a"}.fa-android:before{content:"\f17b"}.fa-linux:before{content:"\f17c"}.fa-dribbble:before{content:"\f17d"}.fa-skype:before{content:"\f17e"}.fa-foursquare:before{content:"\f180"}.fa-trello:before{content:"\f181"}.fa-female:before{content:"\f182"}.fa-male:before{content:"\f183"}.fa-gittip:before,.fa-gratipay:before{content:"\f184"}.fa-sun-o:before{content:"\f185"}.fa-moon-o:before{content:"\f186"}.fa-archive:before{content:"\f187"}.fa-bug:before{content:"\f188"}.fa-vk:before{content:"\f189"}.fa-weibo:before{content:"\f18a"}.fa-renren:before{content:"\f18b"}.fa-pagelines:before{content:"\f18c"}.fa-stack-exchange:before{content:"\f18d"}.fa-arrow-circle-o-right:before{content:"\f18e"}.fa-arrow-circle-o-left:before{content:"\f190"}.fa-toggle-left:before,.fa-caret-square-o-left:before{content:"\f191"}.fa-dot-circle-o:before{content:"\f192"}.fa-wheelchair:before{content:"\f193"}.fa-vimeo-square:before{content:"\f194"}.fa-turkish-lira:before,.fa-try:before{content:"\f195"}.fa-plus-square-o:before{content:"\f196"}.fa-space-shuttle:before{content:"\f197"}.fa-slack:before{content:"\f198"}.fa-envelope-square:before{content:"\f199"}.fa-wordpress:before{content:"\f19a"}.fa-openid:before{content:"\f19b"}.fa-institution:before,.fa-bank:before,.fa-university:before{content:"\f19c"}.fa-mortar-board:before,.fa-graduation-cap:before{content:"\f19d"}.fa-yahoo:before{content:"\f19e"}.fa-google:before{content:"\f1a0"}.fa-reddit:before{content:"\f1a1"}.fa-reddit-square:before{content:"\f1a2"}.fa-stumbleupon-circle:before{content:"\f1a3"}.fa-stumbleupon:before{content:"\f1a4"}.fa-delicious:before{content:"\f1a5"}.fa-digg:before{content:"\f1a6"}.fa-pied-piper:before{content:"\f1a7"}.fa-pied-piper-alt:before{content:"\f1a8"}.fa-drupal:before{content:"\f1a9"}.fa-joomla:before{content:"\f1aa"}.fa-language:before{content:"\f1ab"}.fa-fax:before{content:"\f1ac"}.fa-building:before{content:"\f1ad"}.fa-child:before{content:"\f1ae"}.fa-paw:before{content:"\f1b0"}.fa-spoon:before{content:"\f1b1"}.fa-cube:before{content:"\f1b2"}.fa-cubes:before{content:"\f1b3"}.fa-behance:before{content:"\f1b4"}.fa-behance-square:before{content:"\f1b5"}.fa-steam:before{content:"\f1b6"}.fa-steam-square:before{content:"\f1b7"}.fa-recycle:before{content:"\f1b8"}.fa-automobile:before,.fa-car:before{content:"\f1b9"}.fa-cab:before,.fa-taxi:before{content:"\f1ba"}.fa-tree:before{content:"\f1bb"}.fa-spotify:before{content:"\f1bc"}.fa-deviantart:before{content:"\f1bd"}.fa-soundcloud:before{content:"\f1be"}.fa-database:before{content:"\f1c0"}.fa-file-pdf-o:before{content:"\f1c1"}.fa-file-word-o:before{content:"\f1c2"}.fa-file-excel-o:before{content:"\f1c3"}.fa-file-powerpoint-o:before{content:"\f1c4"}.fa-file-photo-o:before,.fa-file-picture-o:before,.fa-file-image-o:before{content:"\f1c5"}.fa-file-zip-o:before,.fa-file-archive-o:before{content:"\f1c6"}.fa-file-sound-o:before,.fa-file-audio-o:before{content:"\f1c7"}.fa-file-movie-o:before,.fa-file-video-o:before{content:"\f1c8"}.fa-file-code-o:before{content:"\f1c9"}.fa-vine:before{content:"\f1ca"}.fa-codepen:before{content:"\f1cb"}.fa-jsfiddle:before{content:"\f1cc"}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-saver:before,.fa-support:before,.fa-life-ring:before{content:"\f1cd"}.fa-circle-o-notch:before{content:"\f1ce"}.fa-ra:before,.fa-rebel:before{content:"\f1d0"}.fa-ge:before,.fa-empire:before{content:"\f1d1"}.fa-git-square:before{content:"\f1d2"}.fa-git:before{content:"\f1d3"}.fa-hacker-news:before{content:"\f1d4"}.fa-tencent-weibo:before{content:"\f1d5"}.fa-qq:before{content:"\f1d6"}.fa-wechat:before,.fa-weixin:before{content:"\f1d7"}.fa-send:before,.fa-paper-plane:before{content:"\f1d8"}.fa-send-o:before,.fa-paper-plane-o:before{content:"\f1d9"}.fa-history:before{content:"\f1da"}.fa-genderless:before,.fa-circle-thin:before{content:"\f1db"}.fa-header:before{content:"\f1dc"}.fa-paragraph:before{content:"\f1dd"}.fa-sliders:before{content:"\f1de"}.fa-share-alt:before{content:"\f1e0"}.fa-share-alt-square:before{content:"\f1e1"}.fa-bomb:before{content:"\f1e2"}.fa-soccer-ball-o:before,.fa-futbol-o:before{content:"\f1e3"}.fa-tty:before{content:"\f1e4"}.fa-binoculars:before{content:"\f1e5"}.fa-plug:before{content:"\f1e6"}.fa-slideshare:before{content:"\f1e7"}.fa-twitch:before{content:"\f1e8"}.fa-yelp:before{content:"\f1e9"}.fa-newspaper-o:before{content:"\f1ea"}.fa-wifi:before{content:"\f1eb"}.fa-calculator:before{content:"\f1ec"}.fa-paypal:before{content:"\f1ed"}.fa-google-wallet:before{content:"\f1ee"}.fa-cc-visa:before{content:"\f1f0"}.fa-cc-mastercard:before{content:"\f1f1"}.fa-cc-discover:before{content:"\f1f2"}.fa-cc-amex:before{content:"\f1f3"}.fa-cc-paypal:before{content:"\f1f4"}.fa-cc-stripe:before{content:"\f1f5"}.fa-bell-slash:before{content:"\f1f6"}.fa-bell-slash-o:before{content:"\f1f7"}.fa-trash:before{content:"\f1f8"}.fa-copyright:before{content:"\f1f9"}.fa-at:before{content:"\f1fa"}.fa-eyedropper:before{content:"\f1fb"}.fa-paint-brush:before{content:"\f1fc"}.fa-birthday-cake:before{content:"\f1fd"}.fa-area-chart:before{content:"\f1fe"}.fa-pie-chart:before{content:"\f200"}.fa-line-chart:before{content:"\f201"}.fa-lastfm:before{content:"\f202"}.fa-lastfm-square:before{content:"\f203"}.fa-toggle-off:before{content:"\f204"}.fa-toggle-on:before{content:"\f205"}.fa-bicycle:before{content:"\f206"}.fa-bus:before{content:"\f207"}.fa-ioxhost:before{content:"\f208"}.fa-angellist:before{content:"\f209"}.fa-cc:before{content:"\f20a"}.fa-shekel:before,.fa-sheqel:before,.fa-ils:before{content:"\f20b"}.fa-meanpath:before{content:"\f20c"}.fa-buysellads:before{content:"\f20d"}.fa-connectdevelop:before{content:"\f20e"}.fa-dashcube:before{content:"\f210"}.fa-forumbee:before{content:"\f211"}.fa-leanpub:before{content:"\f212"}.fa-sellsy:before{content:"\f213"}.fa-shirtsinbulk:before{content:"\f214"}.fa-simplybuilt:before{content:"\f215"}.fa-skyatlas:before{content:"\f216"}.fa-cart-plus:before{content:"\f217"}.fa-cart-arrow-down:before{content:"\f218"}.fa-diamond:before{content:"\f219"}.fa-ship:before{content:"\f21a"}.fa-user-secret:before{content:"\f21b"}.fa-motorcycle:before{content:"\f21c"}.fa-street-view:before{content:"\f21d"}.fa-heartbeat:before{content:"\f21e"}.fa-venus:before{content:"\f221"}.fa-mars:before{content:"\f222"}.fa-mercury:before{content:"\f223"}.fa-transgender:before{content:"\f224"}.fa-transgender-alt:before{content:"\f225"}.fa-venus-double:before{content:"\f226"}.fa-mars-double:before{content:"\f227"}.fa-venus-mars:before{content:"\f228"}.fa-mars-stroke:before{content:"\f229"}.fa-mars-stroke-v:before{content:"\f22a"}.fa-mars-stroke-h:before{content:"\f22b"}.fa-neuter:before{content:"\f22c"}.fa-facebook-official:before{content:"\f230"}.fa-pinterest-p:before{content:"\f231"}.fa-whatsapp:before{content:"\f232"}.fa-server:before{content:"\f233"}.fa-user-plus:before{content:"\f234"}.fa-user-times:before{content:"\f235"}.fa-hotel:before,.fa-bed:before{content:"\f236"}.fa-viacoin:before{content:"\f237"}.fa-train:before{content:"\f238"}.fa-subway:before{content:"\f239"}.fa-medium:before{content:"\f23a"} \ No newline at end of file diff --git a/static/css/reader/annotations.css b/static/css/reader/annotations.css deleted file mode 100644 index 7a77e668d..000000000 --- a/static/css/reader/annotations.css +++ /dev/null @@ -1,3 +0,0 @@ -.annotator-adder { - width: 80px; -} diff --git a/static/css/reader/main.css b/static/css/reader/main.css deleted file mode 100755 index 953a40b74..000000000 --- a/static/css/reader/main.css +++ /dev/null @@ -1,817 +0,0 @@ -@font-face { - font-family: 'fontello'; - src: url('../../fonts/fontello.eot?60518104'); - src: url('../../fonts/fontello.eot?60518104#iefix') format('embedded-opentype'), - url('../../fonts/fontello.woff?60518104') format('woff'), - url('../../fonts/fontello.ttf?60518104') format('truetype'), - url('../../fonts/fontello.svg?60518104#fontello') format('svg'); - font-weight: normal; - font-style: normal; -} - -body { - background: #4e4e4e; - overflow: hidden; -} - -#main { - /* height: 500px; */ - position: absolute; - width: 100%; - height: 100%; - right: 0; - /* left: 40px; */ -/* -webkit-transform: translate(40px, 0); - -moz-transform: translate(40px, 0); */ - - /* border-radius: 5px 0px 0px 5px; */ - border-radius: 5px; - background: #fff; - overflow: hidden; - -webkit-transition: -webkit-transform .4s, width .2s; - -moz-transition: -webkit-transform .4s, width .2s; - -ms-transition: -webkit-transform .4s, width .2s; - - -moz-box-shadow: inset 0 0 50px rgba(0,0,0,.1); - -webkit-box-shadow: inset 0 0 50px rgba(0,0,0,.1); - -ms-box-shadow: inset 0 0 50px rgba(0,0,0,.1); - box-shadow: inset 0 0 50px rgba(0,0,0,.1); -} - - -#titlebar { - height: 8%; - min-height: 20px; - padding: 10px; - /* margin: 0 50px 0 50px; */ - position: relative; - color: #4f4f4f; - font-weight: 100; - font-family: Georgia, "Times New Roman", Times, serif; - opacity: .5; - text-align: center; - -webkit-transition: opacity .5s; - -moz-transition: opacity .5s; - -ms-transition: opacity .5s; - z-index: 10; -} - -#titlebar:hover { - opacity: 1; -} - -#titlebar a { - width: 18px; - height: 19px; - line-height: 20px; - overflow: hidden; - display: inline-block; - opacity: .5; - padding: 4px; - border-radius: 4px; -} - -#titlebar a::before { - visibility: visible; -} - -#titlebar a:hover { - opacity: .8; - border: 1px rgba(0,0,0,.2) solid; - padding: 3px; -} - -#titlebar a:active { - opacity: 1; - color: rgba(0,0,0,.6); - /* margin: 1px -1px -1px 1px; */ - -moz-box-shadow: inset 0 0 6px rgba(155,155,155,.8); - -webkit-box-shadow: inset 0 0 6px rgba(155,155,155,.8); - -ms-box-shadow: inset 0 0 6px rgba(155,155,155,.8); - box-shadow: inset 0 0 6px rgba(155,155,155,.8); -} - -#book-title { - font-weight: 600; -} - -#title-seperator { - display: none; -} - -#viewer { - width: 80%; - height: 80%; - /* margin-left: 10%; */ - margin: 0 auto; - max-width: 1250px; - z-index: 2; - position: relative; - overflow: hidden; -} - -#viewer iframe { - border: none; -} - -#prev { - left: 40px; -} - -#next { - right: 40px; -} - -.arrow { - position: absolute; - top: 50%; - margin-top: -32px; - font-size: 64px; - color: #E2E2E2; - font-family: arial, sans-serif; - font-weight: bold; - cursor: pointer; - -webkit-user-select: none; - -khtml-user-select: none; - -moz-user-select: none; - -ms-user-select: none; - user-select: none; -} - -.arrow:hover { - color: #777; -} - -.arrow:active, -.arrow.active { - color: #000; -} - -#sidebar { - background: #6b6b6b; - position: absolute; - /* left: -260px; */ - /* -webkit-transform: translate(-260px, 0); - -moz-transform: translate(-260px, 0); */ - top: 0; - min-width: 300px; - width: 25%; - height: 100%; - -webkit-transition: -webkit-transform .5s; - -moz-transition: -moz-transform .5s; - -ms-transition: -moz-transform .5s; - - overflow: hidden; -} - -#sidebar.open { - /* left: 0; */ - /* -webkit-transform: translate(0, 0); - -moz-transform: translate(0, 0); */ -} - -#main.closed { - /* left: 300px; */ - -webkit-transform: translate(300px, 0); - -moz-transform: translate(300px, 0); - -ms-transform: translate(300px, 0); -} - -#main.single { - width: 75%; -} - -#main.single #viewer { - /* width: 60%; - margin-left: 20%; */ -} - -#panels { - background: #4e4e4e; - position: absolute; - left: 0; - top: 0; - width: 100%; - padding: 13px 0; - height: 14px; - -moz-box-shadow: 0px 1px 3px rgba(0,0,0,.6); - -webkit-box-shadow: 0px 1px 3px rgba(0,0,0,.6); - -ms-box-shadow: 0px 1px 3px rgba(0,0,0,.6); - box-shadow: 0px 1px 3px rgba(0,0,0,.6); -} - -#opener { - /* padding: 10px 10px; */ - float: left; -} - -/* #opener #slider { - width: 25px; -} */ - -#metainfo { - display: inline-block; - text-align: center; - max-width: 80%; -} - -#title-controls { - float: right; -} - -#panels a { - visibility: hidden; - width: 18px; - height: 20px; - overflow: hidden; - display: inline-block; - color: #ccc; - margin-left: 6px; -} - -#panels a::before { - visibility: visible; -} - -#panels a:hover { - color: #AAA; -} - -#panels a:active { - color: #AAA; - margin: 1px 0 -1px 6px; -} - -#panels a.active, -#panels a.active:hover { - color: #AAA; -} - -#searchBox { - width: 165px; - float: left; - margin-left: 10px; - margin-top: -1px; - /* - border-radius: 5px; - background: #9b9b9b; - float: left; - margin-left: 5px; - margin-top: -5px; - padding: 3px 10px; - color: #000; - border: none; - outline: none; */ - -} - -input::-webkit-input-placeholder { - color: #454545; -} -input:-moz-placeholder { - color: #454545; -} -input:-ms-placeholder { - color: #454545; -} - -#divider { - position: absolute; - width: 1px; - border-right: 1px #000 solid; - height: 80%; - z-index: 1; - left: 50%; - margin-left: -1px; - top: 10%; - opacity: .15; - box-shadow: -2px 0 15px rgba(0, 0, 0, 1); - display: none; -} - -#divider.show { - display: block; -} - -#loader { - position: absolute; - z-index: 10; - left: 50%; - top: 50%; - margin: -33px 0 0 -33px; -} - -#tocView, -#bookmarksView { - overflow-x: hidden; - overflow-y: hidden; - min-width: 300px; - width: 25%; - height: 100%; - visibility: hidden; - -webkit-transition: visibility 0 ease .5s; - -moz-transition: visibility 0 ease .5s; - -ms-transition: visibility 0 ease .5s; -} - - - -#sidebar.open #tocView, -#sidebar.open #bookmarksView { - overflow-y: auto; - visibility: visible; - -webkit-transition: visibility 0 ease 0; - -moz-transition: visibility 0 ease 0; - -ms-transition: visibility 0 ease 0; -} - -#sidebar.open #tocView { - display: block; -} - -#tocView > ul, -#bookmarksView > ul { - margin-top: 15px; - margin-bottom: 50px; - padding-left: 20px; - display: block; -} - -#tocView li, -#bookmarksView li { - margin-bottom:10px; - width: 225px; - font-family: Georgia, "Times New Roman", Times, serif; - list-style: none; - text-transform: capitalize; -} - -#tocView li:active, -#tocView li.currentChapter -{ - list-style: none; -} - -.list_item a { - color: #AAA; - text-decoration: none; -} - -.list_item a.chapter { - font-size: 1em; -} - -.list_item a.section { - font-size: .8em; -} - -.list_item.currentChapter > a, -.list_item a:hover { - color: #f1f1f1 -} - -/* #tocView li.openChapter > a, */ -.list_item a:hover { - color: #E2E2E2; -} - -.list_item ul { - padding-left:10px; - margin-top: 8px; - display: none; -} - -.list_item.currentChapter > ul, -.list_item.openChapter > ul { - display: block; -} - -#tocView.hidden { - display: none; -} - -.toc_toggle { - display: inline-block; - width: 14px; - cursor: pointer; - -webkit-user-select: none; - -moz-user-select: none; - -ms-user-select: none; - user-select: none; -} - -.toc_toggle:before { - content: '▸'; - color: #fff; - margin-right: -4px; -} - -.currentChapter > .toc_toggle:before, -.openChapter > .toc_toggle:before { - content: '▾'; -} - -.view { - width: 300px; - height: 100%; - display: none; - padding-top: 50px; - overflow-y: auto; -} - -#searchResults { - margin-bottom: 50px; - padding-left: 20px; - display: block; -} - -#searchResults li { - margin-bottom:10px; - width: 225px; - font-family: Georgia, "Times New Roman", Times, serif; - list-style: none; -} - -#searchResults a { - color: #AAA; - text-decoration: none; -} - -#searchResults p { - text-decoration: none; - font-size: 12px; - line-height: 16px; -} - -#searchResults p .match { - background: #ccc; - color: #000; -} - -#searchResults li > p { - color: #AAA; -} - -#searchResults li a:hover { - color: #E2E2E2; -} - -#searchView.shown { - display: block; - overflow-y: scroll; -} - -#notes { - padding: 0 0 0 34px; -} - -#notes li { - color: #eee; - font-size: 12px; - width: 240px; - border-top: 1px #fff solid; - padding-top: 6px; - margin-bottom: 6px; -} - -#notes li a { - color: #fff; - display: inline-block; - margin-left: 6px; -} - -#notes li a:hover { - text-decoration: underline; -} - -#notes li img { - max-width: 240px; -} - -#note-text { - display: block; - width: 260px; - height: 80px; - margin: 0 auto; - padding: 5px; - border-radius: 5px; -} - -#note-text[disabled], #note-text[disabled="disabled"]{ - opacity: .5; -} - -#note-anchor { - margin-left: 218px; - margin-top: 5px; -} - -#settingsPanel { - display:none; -} - -#settingsPanel h3 { - color:#f1f1f1; - font-family:Georgia, "Times New Roman", Times, serif; - margin-bottom:10px; -} - -#settingsPanel ul { - margin-top:60px; - list-style-type:none; -} - -#settingsPanel li { - font-size:1em; - color:#f1f1f1; -} - -#settingsPanel .xsmall { font-size:x-small; } -#settingsPanel .small { font-size:small; } -#settingsPanel .medium { font-size:medium; } -#settingsPanel .large { font-size:large; } -#settingsPanel .xlarge { font-size:x-large; } - -.highlight { background-color: yellow } - -.modal { - position: fixed; - top: 50%; - left: 50%; - width: 50%; - width: 630px; - - height: auto; - z-index: 2000; - visibility: hidden; - margin-left: -320px; - margin-top: -160px; - -} - -.overlay { - position: fixed; - width: 100%; - height: 100%; - visibility: hidden; - top: 0; - left: 0; - z-index: 1000; - opacity: 0; - background: rgba(255,255,255,0.8); - -webkit-transition: all 0.3s; - -moz-transition: all 0.3s; - -ms-transition: all 0.3s; - transition: all 0.3s; -} - -.md-show { - visibility: visible; -} - -.md-show ~ .overlay { - opacity: 1; - visibility: visible; -} - -/* Content styles */ -.md-content { - color: #fff; - background: #6b6b6b; - position: relative; - border-radius: 3px; - margin: 0 auto; - height: 320px; -} - -.md-content h3 { - margin: 0; - padding: 6px; - text-align: center; - font-size: 22px; - font-weight: 300; - opacity: 0.8; - background: rgba(0,0,0,0.1); - border-radius: 3px 3px 0 0; -} - -.md-content > div { - padding: 15px 40px 30px; - margin: 0; - font-weight: 300; - font-size: 14px; -} - -.md-content > div p { - margin: 0; - padding: 10px 0; -} - -.md-content > div ul { - margin: 0; - padding: 0 0 30px 20px; -} - -.md-content > div ul li { - padding: 5px 0; -} - -.md-content button { - display: block; - margin: 0 auto; - font-size: 0.8em; -} - -/* Effect 1: Fade in and scale up */ -.md-effect-1 .md-content { - -webkit-transform: scale(0.7); - -moz-transform: scale(0.7); - -ms-transform: scale(0.7); - transform: scale(0.7); - opacity: 0; - -webkit-transition: all 0.3s; - -moz-transition: all 0.3s; - -ms-transition: all 0.3s; - transition: all 0.3s; -} - -.md-show.md-effect-1 .md-content { - -webkit-transform: scale(1); - -moz-transform: scale(1); - -ms-transform: scale(1); - transform: scale(1); - opacity: 1; -} - -.md-content > .closer { - font-size: 18px; - position: absolute; - right: 0; - top: 0; - font-size: 24px; - padding: 4px; -} - -@media only screen and (max-width: 1040px) { - #viewer{ - width: 50%; - margin-left: 25%; - } - - #divider, - #divider.show { - display: none; - } -} - -@media only screen and (max-width: 900px) { - #viewer{ - width: 60%; - margin-left: 20%; - } - - #prev { - left: 20px; - } - - #next { - right: 20px; - } -} - -@media only screen and (max-width: 550px) { - #viewer{ - width: 80%; - margin-left: 10%; - } - - #prev { - left: 0; - } - - #next { - right: 0; - } - - .arrow { - height: 100%; - top: 45px; - width: 10%; - text-indent: -10000px; - } - - #main { - -webkit-transform: translate(0, 0); - -moz-transform: translate(0, 0); - -ms-transform: translate(0, 0); - -webkit-transition: -webkit-transform .3s; - -moz-transition: -moz-transform .3s; - -ms-transition: -moz-transform .3s; - } - - #main.closed { - -webkit-transform: translate(260px, 0); - -moz-transform: translate(260px, 0); - -ms-transform: translate(260px, 0); - } - - #titlebar { - /* font-size: 16px; */ - /* margin: 0 50px 0 50px; */ - } - - #metainfo { - font-size: 10px; - } - - #tocView { - width: 260px; - } - - #tocView li { - font-size: 12px; - } - - #tocView > ul{ - padding-left: 10px; - } -} - - -/* For iPad portrait layouts only */ -@media only screen and (min-device-width: 481px) and (max-device-width: 1024px) and (orientation: portrait) { - #viewer iframe { - width: 460px; - height: 740px; - } -} - /*For iPad landscape layouts only */ -@media only screen and (min-device-width: 481px) and (max-device-width: 1024px) and (orientation: landscape) { - #viewer iframe { - width: 460px; - height: 415px; - } -} -/* For iPhone portrait layouts only */ -@media only screen and (max-device-width: 480px) and (orientation: portrait) { - #viewer { - width: 256px; - height: 432px; - } - #viewer iframe { - width: 256px; - height: 432px; - } -} -/* For iPhone landscape layouts only */ -@media only screen and (max-device-width: 480px) and (orientation: landscape) { - #viewer iframe { - width: 256px; - height: 124px; - } -} - -[class^="icon-"]:before, [class*=" icon-"]:before { - font-family: "fontello"; - font-style: normal; - font-weight: normal; - speak: none; - - display: inline-block; - text-decoration: inherit; - width: 1em; - margin-right: .2em; - text-align: center; - /* opacity: .8; */ - - /* For safety - reset parent styles, that can break glyph codes*/ - font-variant: normal; - text-transform: none; - - /* you can be more comfortable with increased icons size */ - font-size: 112%; -} - - -.icon-search:before { content: '\e807'; } /* '' */ -.icon-resize-full-1:before { content: '\e804'; } /* '' */ -.icon-cancel-circled2:before { content: '\e80f'; } /* '' */ -.icon-link:before { content: '\e80d'; } /* '' */ -.icon-bookmark:before { content: '\e805'; } /* '' */ -.icon-bookmark-empty:before { content: '\e806'; } /* '' */ -.icon-download-cloud:before { content: '\e811'; } /* '' */ -.icon-edit:before { content: '\e814'; } /* '' */ -.icon-menu:before { content: '\e802'; } /* '' */ -.icon-cog:before { content: '\e813'; } /* '' */ -.icon-resize-full:before { content: '\e812'; } /* '' */ -.icon-cancel-circled:before { content: '\e80e'; } /* '' */ -.icon-up-dir:before { content: '\e80c'; } /* '' */ -.icon-right-dir:before { content: '\e80b'; } /* '' */ -.icon-angle-right:before { content: '\e809'; } /* '' */ -.icon-angle-down:before { content: '\e80a'; } /* '' */ -.icon-right:before { content: '\e815'; } /* '' */ -.icon-list-1:before { content: '\e803'; } /* '' */ -.icon-list-numbered:before { content: '\e801'; } /* '' */ -.icon-columns:before { content: '\e810'; } /* '' */ -.icon-list:before { content: '\e800'; } /* '' */ -.icon-resize-small:before { content: '\e808'; } /* '' */ diff --git a/static/css/reader/normalize.css b/static/css/reader/normalize.css deleted file mode 100755 index c3e014d95..000000000 --- a/static/css/reader/normalize.css +++ /dev/null @@ -1,505 +0,0 @@ -/*! normalize.css v1.0.1 | MIT License | git.io/normalize */ - -/* ========================================================================== - HTML5 display definitions - ========================================================================== */ - -/* - * Corrects `block` display not defined in IE 6/7/8/9 and Firefox 3. - */ - -article, -aside, -details, -figcaption, -figure, -footer, -header, -hgroup, -nav, -section, -summary { - display: block; -} - -/* - * Corrects `inline-block` display not defined in IE 6/7/8/9 and Firefox 3. - */ - -audio, -canvas, -video { - display: inline-block; - *display: inline; - *zoom: 1; -} - -/* - * Prevents modern browsers from displaying `audio` without controls. - * Remove excess height in iOS 5 devices. - */ - -audio:not([controls]) { - display: none; - height: 0; -} - -/* - * Addresses styling for `hidden` attribute not present in IE 7/8/9, Firefox 3, - * and Safari 4. - * Known issue: no IE 6 support. - */ - -[hidden] { - display: none; -} - -/* ========================================================================== - Base - ========================================================================== */ - -/* - * 1. Corrects text resizing oddly in IE 6/7 when body `font-size` is set using - * `em` units. - * 2. Prevents iOS text size adjust after orientation change, without disabling - * user zoom. - */ - -html { - font-size: 100%; /* 1 */ - -webkit-text-size-adjust: 100%; /* 2 */ - -ms-text-size-adjust: 100%; /* 2 */ -} - -/* - * Addresses `font-family` inconsistency between `textarea` and other form - * elements. - */ - -html, -button, -input, -select, -textarea { - font-family: sans-serif; -} - -/* - * Addresses margins handled incorrectly in IE 6/7. - */ - -body { - margin: 0; -} - -/* ========================================================================== - Links - ========================================================================== */ - -/* - * Addresses `outline` inconsistency between Chrome and other browsers. - */ - -a:focus { - outline: thin dotted; -} - -/* - * Improves readability when focused and also mouse hovered in all browsers. - */ - -a:active, -a:hover { - outline: 0; -} - -/* ========================================================================== - Typography - ========================================================================== */ - -/* - * Addresses font sizes and margins set differently in IE 6/7. - * Addresses font sizes within `section` and `article` in Firefox 4+, Safari 5, - * and Chrome. - */ - -h1 { - font-size: 2em; - margin: 0.67em 0; -} - -h2 { - font-size: 1.5em; - margin: 0.83em 0; -} - -h3 { - font-size: 1.17em; - margin: 1em 0; -} - -h4 { - font-size: 1em; - margin: 1.33em 0; -} - -h5 { - font-size: 0.83em; - margin: 1.67em 0; -} - -h6 { - font-size: 0.75em; - margin: 2.33em 0; -} - -/* - * Addresses styling not present in IE 7/8/9, Safari 5, and Chrome. - */ - -abbr[title] { - border-bottom: 1px dotted; -} - -/* - * Addresses style set to `bolder` in Firefox 3+, Safari 4/5, and Chrome. - */ - -b, -strong { - font-weight: bold; -} - -blockquote { - margin: 1em 40px; -} - -/* - * Addresses styling not present in Safari 5 and Chrome. - */ - -dfn { - font-style: italic; -} - -/* - * Addresses styling not present in IE 6/7/8/9. - */ - -mark { - background: #ff0; - color: #000; -} - -/* - * Addresses margins set differently in IE 6/7. - */ - -p, -pre { - margin: 1em 0; -} - -/* - * Corrects font family set oddly in IE 6, Safari 4/5, and Chrome. - */ - -code, -kbd, -pre, -samp { - font-family: monospace, serif; - _font-family: 'courier new', monospace; - font-size: 1em; -} - -/* - * Improves readability of pre-formatted text in all browsers. - */ - -pre { - white-space: pre; - white-space: pre-wrap; - word-wrap: break-word; -} - -/* - * Addresses CSS quotes not supported in IE 6/7. - */ - -q { - quotes: none; -} - -/* - * Addresses `quotes` property not supported in Safari 4. - */ - -q:before, -q:after { - content: ''; - content: none; -} - -/* - * Addresses inconsistent and variable font size in all browsers. - */ - -small { - font-size: 80%; -} - -/* - * Prevents `sub` and `sup` affecting `line-height` in all browsers. - */ - -sub, -sup { - font-size: 75%; - line-height: 0; - position: relative; - vertical-align: baseline; -} - -sup { - top: -0.5em; -} - -sub { - bottom: -0.25em; -} - -/* ========================================================================== - Lists - ========================================================================== */ - -/* - * Addresses margins set differently in IE 6/7. - */ - -dl, -menu, -ol, -ul { - margin: 1em 0; -} - -dd { - margin: 0 0 0 40px; -} - -/* - * Addresses paddings set differently in IE 6/7. - */ - -menu, -ol, -ul { - padding: 0 0 0 40px; -} - -/* - * Corrects list images handled incorrectly in IE 7. - */ - -nav ul, -nav ol { - list-style: none; - list-style-image: none; -} - -/* ========================================================================== - Embedded content - ========================================================================== */ - -/* - * 1. Removes border when inside `a` element in IE 6/7/8/9 and Firefox 3. - * 2. Improves image quality when scaled in IE 7. - */ - -img { - border: 0; /* 1 */ - -ms-interpolation-mode: bicubic; /* 2 */ -} - -/* - * Corrects overflow displayed oddly in IE 9. - */ - -svg:not(:root) { - overflow: hidden; -} - -/* ========================================================================== - Figures - ========================================================================== */ - -/* - * Addresses margin not present in IE 6/7/8/9, Safari 5, and Opera 11. - */ - -figure { - margin: 0; -} - -/* ========================================================================== - Forms - ========================================================================== */ - -/* - * Corrects margin displayed oddly in IE 6/7. - */ - -form { - margin: 0; -} - -/* - * Define consistent border, margin, and padding. - */ - -fieldset { - border: 1px solid #c0c0c0; - margin: 0 2px; - padding: 0.35em 0.625em 0.75em; -} - -/* - * 1. Corrects color not being inherited in IE 6/7/8/9. - * 2. Corrects text not wrapping in Firefox 3. - * 3. Corrects alignment displayed oddly in IE 6/7. - */ - -legend { - border: 0; /* 1 */ - padding: 0; - white-space: normal; /* 2 */ - *margin-left: -7px; /* 3 */ -} - -/* - * 1. Corrects font size not being inherited in all browsers. - * 2. Addresses margins set differently in IE 6/7, Firefox 3+, Safari 5, - * and Chrome. - * 3. Improves appearance and consistency in all browsers. - */ - -button, -input, -select, -textarea { - font-size: 100%; /* 1 */ - margin: 0; /* 2 */ - vertical-align: baseline; /* 3 */ - *vertical-align: middle; /* 3 */ -} - -/* - * Addresses Firefox 3+ setting `line-height` on `input` using `!important` in - * the UA stylesheet. - */ - -button, -input { - line-height: normal; -} - -/* - * 1. Avoid the WebKit bug in Android 4.0.* where (2) destroys native `audio` - * and `video` controls. - * 2. Corrects inability to style clickable `input` types in iOS. - * 3. Improves usability and consistency of cursor style between image-type - * `input` and others. - * 4. Removes inner spacing in IE 7 without affecting normal text inputs. - * Known issue: inner spacing remains in IE 6. - */ - -button, -html input[type="button"], /* 1 */ -input[type="reset"], -input[type="submit"] { - -webkit-appearance: button; /* 2 */ - cursor: pointer; /* 3 */ - *overflow: visible; /* 4 */ -} - -/* - * Re-set default cursor for disabled elements. - */ - -button[disabled], -input[disabled] { - cursor: default; -} - -/* - * 1. Addresses box sizing set to content-box in IE 8/9. - * 2. Removes excess padding in IE 8/9. - * 3. Removes excess padding in IE 7. - * Known issue: excess padding remains in IE 6. - */ - -input[type="checkbox"], -input[type="radio"] { - box-sizing: border-box; /* 1 */ - padding: 0; /* 2 */ - *height: 13px; /* 3 */ - *width: 13px; /* 3 */ -} - -/* - * 1. Addresses `appearance` set to `searchfield` in Safari 5 and Chrome. - * 2. Addresses `box-sizing` set to `border-box` in Safari 5 and Chrome - * (include `-moz` to future-proof). - */ -/* -input[type="search"] { - -webkit-appearance: textfield; - -moz-box-sizing: content-box; - -webkit-box-sizing: content-box; - box-sizing: content-box; -} -*/ - -/* - * Removes inner padding and search cancel button in Safari 5 and Chrome - * on OS X. - */ - -/* input[type="search"]::-webkit-search-cancel-button, -input[type="search"]::-webkit-search-decoration { - -webkit-appearance: none; -} */ - -/* - * Removes inner padding and border in Firefox 3+. - */ - -button::-moz-focus-inner, -input::-moz-focus-inner { - border: 0; - padding: 0; -} - -/* - * 1. Removes default vertical scrollbar in IE 6/7/8/9. - * 2. Improves readability and alignment in all browsers. - */ - -textarea { - overflow: auto; /* 1 */ - vertical-align: top; /* 2 */ -} - -/* ========================================================================== - Tables - ========================================================================== */ - -/* - * Remove most spacing between table cells. - */ - -table { - border-collapse: collapse; - border-spacing: 0; -} diff --git a/static/css/reader/popup.css b/static/css/reader/popup.css deleted file mode 100644 index c41aac716..000000000 --- a/static/css/reader/popup.css +++ /dev/null @@ -1,96 +0,0 @@ -/* http://davidwalsh.name/css-tooltips */ -/* base CSS element */ -.popup { - background: #eee; - border: 1px solid #ccc; - padding: 10px; - border-radius: 8px; - box-shadow: 0 2px 5px rgba(0, 0, 0, 0.2); - position: fixed; - max-width: 300px; - font-size: 12px; - - display: none; - margin-left: 2px; - - margin-top: 30px; -} - -.popup.above { - margin-top: -10px; -} - -.popup.left { - margin-left: -20px; -} - -.popup.right { - margin-left: 40px; -} - -.pop_content { - max-height: 225px; - overflow-y: auto; -} - -.pop_content > p { - margin-top: 0; -} - -/* below */ -.popup:before { - position: absolute; - display: inline-block; - border-bottom: 10px solid #eee; - border-right: 10px solid transparent; - border-left: 10px solid transparent; - border-bottom-color: rgba(0, 0, 0, 0.2); - left: 50%; - top: -10px; - margin-left: -6px; - content: ''; -} - -.popup:after { - position: absolute; - display: inline-block; - border-bottom: 9px solid #eee; - border-right: 9px solid transparent; - border-left: 9px solid transparent; - left: 50%; - top: -9px; - margin-left: -5px; - content: ''; -} - -/* above */ -.popup.above:before { - border-bottom: none; - border-top: 10px solid #eee; - border-top-color: rgba(0, 0, 0, 0.2); - top: 100%; -} - -.popup.above:after { - border-bottom: none; - border-top: 9px solid #eee; - top: 100%; -} - -.popup.left:before, -.popup.left:after -{ - left: 20px; -} - -.popup.right:before, -.popup.right:after -{ - left: auto; - right: 20px; -} - - -.popup.show, .popup.on { - display: block; -} \ No newline at end of file diff --git a/static/css/ui-lightness/jquery-ui-1.11.4.min.css b/static/css/ui-lightness/jquery-ui-1.11.4.min.css new file mode 100644 index 000000000..efccc4767 --- /dev/null +++ b/static/css/ui-lightness/jquery-ui-1.11.4.min.css @@ -0,0 +1,7 @@ +/*! jQuery UI - v1.11.4 - 2015-03-11 +* http://jqueryui.com +* Includes: core.css, accordion.css, autocomplete.css, button.css, datepicker.css, dialog.css, draggable.css, menu.css, progressbar.css, resizable.css, selectable.css, selectmenu.css, slider.css, sortable.css, spinner.css, tabs.css, tooltip.css, theme.css +* To view and modify this theme, visit http://jqueryui.com/themeroller/?ffDefault=Trebuchet%20MS%2CTahoma%2CVerdana%2CArial%2Csans-serif&fwDefault=bold&fsDefault=1.1em&cornerRadius=4px&bgColorHeader=f6a828&bgTextureHeader=gloss_wave&bgImgOpacityHeader=35&borderColorHeader=e78f08&fcHeader=ffffff&iconColorHeader=ffffff&bgColorContent=eeeeee&bgTextureContent=highlight_soft&bgImgOpacityContent=100&borderColorContent=dddddd&fcContent=333333&iconColorContent=222222&bgColorDefault=f6f6f6&bgTextureDefault=glass&bgImgOpacityDefault=100&borderColorDefault=cccccc&fcDefault=1c94c4&iconColorDefault=ef8c08&bgColorHover=fdf5ce&bgTextureHover=glass&bgImgOpacityHover=100&borderColorHover=fbcb09&fcHover=c77405&iconColorHover=ef8c08&bgColorActive=ffffff&bgTextureActive=glass&bgImgOpacityActive=65&borderColorActive=fbd850&fcActive=eb8f00&iconColorActive=ef8c08&bgColorHighlight=ffe45c&bgTextureHighlight=highlight_soft&bgImgOpacityHighlight=75&borderColorHighlight=fed22f&fcHighlight=363636&iconColorHighlight=228ef1&bgColorError=b81900&bgTextureError=diagonals_thick&bgImgOpacityError=18&borderColorError=cd0a0a&fcError=ffffff&iconColorError=ffd27a&bgColorOverlay=666666&bgTextureOverlay=diagonals_thick&bgImgOpacityOverlay=20&opacityOverlay=50&bgColorShadow=000000&bgTextureShadow=flat&bgImgOpacityShadow=10&opacityShadow=20&thicknessShadow=5px&offsetTopShadow=-5px&offsetLeftShadow=-5px&cornerRadiusShadow=5px +* Copyright 2015 jQuery Foundation and other contributors; Licensed MIT */ + +.ui-helper-hidden{display:none}.ui-helper-hidden-accessible{border:0;clip:rect(0 0 0 0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.ui-helper-reset{margin:0;padding:0;border:0;outline:0;line-height:1.3;text-decoration:none;font-size:100%;list-style:none}.ui-helper-clearfix:before,.ui-helper-clearfix:after{content:"";display:table;border-collapse:collapse}.ui-helper-clearfix:after{clear:both}.ui-helper-clearfix{min-height:0}.ui-helper-zfix{width:100%;height:100%;top:0;left:0;position:absolute;opacity:0;filter:Alpha(Opacity=0)}.ui-front{z-index:100}.ui-state-disabled{cursor:default!important}.ui-icon{display:block;text-indent:-99999px;overflow:hidden;background-repeat:no-repeat}.ui-widget-overlay{position:fixed;top:0;left:0;width:100%;height:100%}.ui-accordion .ui-accordion-header{display:block;cursor:pointer;position:relative;margin:2px 0 0 0;padding:.5em .5em .5em .7em;min-height:0;font-size:100%}.ui-accordion .ui-accordion-icons{padding-left:2.2em}.ui-accordion .ui-accordion-icons .ui-accordion-icons{padding-left:2.2em}.ui-accordion .ui-accordion-header .ui-accordion-header-icon{position:absolute;left:.5em;top:50%;margin-top:-8px}.ui-accordion .ui-accordion-content{padding:1em 2.2em;border-top:0;overflow:auto}.ui-autocomplete{position:absolute;top:0;left:0;cursor:default}.ui-button{display:inline-block;position:relative;padding:0;line-height:normal;margin-right:.1em;cursor:pointer;vertical-align:middle;text-align:center;overflow:visible}.ui-button,.ui-button:link,.ui-button:visited,.ui-button:hover,.ui-button:active{text-decoration:none}.ui-button-icon-only{width:2.2em}button.ui-button-icon-only{width:2.4em}.ui-button-icons-only{width:3.4em}button.ui-button-icons-only{width:3.7em}.ui-button .ui-button-text{display:block;line-height:normal}.ui-button-text-only .ui-button-text{padding:.4em 1em}.ui-button-icon-only .ui-button-text,.ui-button-icons-only .ui-button-text{padding:.4em;text-indent:-9999999px}.ui-button-text-icon-primary .ui-button-text,.ui-button-text-icons .ui-button-text{padding:.4em 1em .4em 2.1em}.ui-button-text-icon-secondary .ui-button-text,.ui-button-text-icons .ui-button-text{padding:.4em 2.1em .4em 1em}.ui-button-text-icons .ui-button-text{padding-left:2.1em;padding-right:2.1em}input.ui-button{padding:.4em 1em}.ui-button-icon-only .ui-icon,.ui-button-text-icon-primary .ui-icon,.ui-button-text-icon-secondary .ui-icon,.ui-button-text-icons .ui-icon,.ui-button-icons-only .ui-icon{position:absolute;top:50%;margin-top:-8px}.ui-button-icon-only .ui-icon{left:50%;margin-left:-8px}.ui-button-text-icon-primary .ui-button-icon-primary,.ui-button-text-icons .ui-button-icon-primary,.ui-button-icons-only .ui-button-icon-primary{left:.5em}.ui-button-text-icon-secondary .ui-button-icon-secondary,.ui-button-text-icons .ui-button-icon-secondary,.ui-button-icons-only .ui-button-icon-secondary{right:.5em}.ui-buttonset{margin-right:7px}.ui-buttonset .ui-button{margin-left:0;margin-right:-.3em}input.ui-button::-moz-focus-inner,button.ui-button::-moz-focus-inner{border:0;padding:0}.ui-datepicker{width:17em;padding:.2em .2em 0;display:none}.ui-datepicker .ui-datepicker-header{position:relative;padding:.2em 0}.ui-datepicker .ui-datepicker-prev,.ui-datepicker .ui-datepicker-next{position:absolute;top:2px;width:1.8em;height:1.8em}.ui-datepicker .ui-datepicker-prev-hover,.ui-datepicker .ui-datepicker-next-hover{top:1px}.ui-datepicker .ui-datepicker-prev{left:2px}.ui-datepicker .ui-datepicker-next{right:2px}.ui-datepicker .ui-datepicker-prev-hover{left:1px}.ui-datepicker .ui-datepicker-next-hover{right:1px}.ui-datepicker .ui-datepicker-prev span,.ui-datepicker .ui-datepicker-next span{display:block;position:absolute;left:50%;margin-left:-8px;top:50%;margin-top:-8px}.ui-datepicker .ui-datepicker-title{margin:0 2.3em;line-height:1.8em;text-align:center}.ui-datepicker .ui-datepicker-title select{font-size:1em;margin:1px 0}.ui-datepicker select.ui-datepicker-month,.ui-datepicker select.ui-datepicker-year{width:45%}.ui-datepicker table{width:100%;font-size:.9em;border-collapse:collapse;margin:0 0 .4em}.ui-datepicker th{padding:.7em .3em;text-align:center;font-weight:bold;border:0}.ui-datepicker td{border:0;padding:1px}.ui-datepicker td span,.ui-datepicker td a{display:block;padding:.2em;text-align:right;text-decoration:none}.ui-datepicker .ui-datepicker-buttonpane{background-image:none;margin:.7em 0 0 0;padding:0 .2em;border-left:0;border-right:0;border-bottom:0}.ui-datepicker .ui-datepicker-buttonpane button{float:right;margin:.5em .2em .4em;cursor:pointer;padding:.2em .6em .3em .6em;width:auto;overflow:visible}.ui-datepicker .ui-datepicker-buttonpane button.ui-datepicker-current{float:left}.ui-datepicker.ui-datepicker-multi{width:auto}.ui-datepicker-multi .ui-datepicker-group{float:left}.ui-datepicker-multi .ui-datepicker-group table{width:95%;margin:0 auto .4em}.ui-datepicker-multi-2 .ui-datepicker-group{width:50%}.ui-datepicker-multi-3 .ui-datepicker-group{width:33.3%}.ui-datepicker-multi-4 .ui-datepicker-group{width:25%}.ui-datepicker-multi .ui-datepicker-group-last .ui-datepicker-header,.ui-datepicker-multi .ui-datepicker-group-middle .ui-datepicker-header{border-left-width:0}.ui-datepicker-multi .ui-datepicker-buttonpane{clear:left}.ui-datepicker-row-break{clear:both;width:100%;font-size:0}.ui-datepicker-rtl{direction:rtl}.ui-datepicker-rtl .ui-datepicker-prev{right:2px;left:auto}.ui-datepicker-rtl .ui-datepicker-next{left:2px;right:auto}.ui-datepicker-rtl .ui-datepicker-prev:hover{right:1px;left:auto}.ui-datepicker-rtl .ui-datepicker-next:hover{left:1px;right:auto}.ui-datepicker-rtl .ui-datepicker-buttonpane{clear:right}.ui-datepicker-rtl .ui-datepicker-buttonpane button{float:left}.ui-datepicker-rtl .ui-datepicker-buttonpane button.ui-datepicker-current,.ui-datepicker-rtl .ui-datepicker-group{float:right}.ui-datepicker-rtl .ui-datepicker-group-last .ui-datepicker-header,.ui-datepicker-rtl .ui-datepicker-group-middle .ui-datepicker-header{border-right-width:0;border-left-width:1px}.ui-dialog{overflow:hidden;position:absolute;top:0;left:0;padding:.2em;outline:0}.ui-dialog .ui-dialog-titlebar{padding:.4em 1em;position:relative}.ui-dialog .ui-dialog-title{float:left;margin:.1em 0;white-space:nowrap;width:90%;overflow:hidden;text-overflow:ellipsis}.ui-dialog .ui-dialog-titlebar-close{position:absolute;right:.3em;top:50%;width:20px;margin:-10px 0 0 0;padding:1px;height:20px}.ui-dialog .ui-dialog-content{position:relative;border:0;padding:.5em 1em;background:none;overflow:auto}.ui-dialog .ui-dialog-buttonpane{text-align:left;border-width:1px 0 0 0;background-image:none;margin-top:.5em;padding:.3em 1em .5em .4em}.ui-dialog .ui-dialog-buttonpane .ui-dialog-buttonset{float:right}.ui-dialog .ui-dialog-buttonpane button{margin:.5em .4em .5em 0;cursor:pointer}.ui-dialog .ui-resizable-se{width:12px;height:12px;right:-5px;bottom:-5px;background-position:16px 16px}.ui-draggable .ui-dialog-titlebar{cursor:move}.ui-draggable-handle{-ms-touch-action:none;touch-action:none}.ui-menu{list-style:none;padding:0;margin:0;display:block;outline:none}.ui-menu .ui-menu{position:absolute}.ui-menu .ui-menu-item{position:relative;margin:0;padding:3px 1em 3px .4em;cursor:pointer;min-height:0;list-style-image:url("data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7")}.ui-menu .ui-menu-divider{margin:5px 0;height:0;font-size:0;line-height:0;border-width:1px 0 0 0}.ui-menu .ui-state-focus,.ui-menu .ui-state-active{margin:-1px}.ui-menu-icons{position:relative}.ui-menu-icons .ui-menu-item{padding-left:2em}.ui-menu .ui-icon{position:absolute;top:0;bottom:0;left:.2em;margin:auto 0}.ui-menu .ui-menu-icon{left:auto;right:0}.ui-progressbar{height:2em;text-align:left;overflow:hidden}.ui-progressbar .ui-progressbar-value{margin:-1px;height:100%}.ui-progressbar .ui-progressbar-overlay{background:url("data:image/gif;base64,R0lGODlhKAAoAIABAAAAAP///yH/C05FVFNDQVBFMi4wAwEAAAAh+QQJAQABACwAAAAAKAAoAAACkYwNqXrdC52DS06a7MFZI+4FHBCKoDeWKXqymPqGqxvJrXZbMx7Ttc+w9XgU2FB3lOyQRWET2IFGiU9m1frDVpxZZc6bfHwv4c1YXP6k1Vdy292Fb6UkuvFtXpvWSzA+HycXJHUXiGYIiMg2R6W459gnWGfHNdjIqDWVqemH2ekpObkpOlppWUqZiqr6edqqWQAAIfkECQEAAQAsAAAAACgAKAAAApSMgZnGfaqcg1E2uuzDmmHUBR8Qil95hiPKqWn3aqtLsS18y7G1SzNeowWBENtQd+T1JktP05nzPTdJZlR6vUxNWWjV+vUWhWNkWFwxl9VpZRedYcflIOLafaa28XdsH/ynlcc1uPVDZxQIR0K25+cICCmoqCe5mGhZOfeYSUh5yJcJyrkZWWpaR8doJ2o4NYq62lAAACH5BAkBAAEALAAAAAAoACgAAAKVDI4Yy22ZnINRNqosw0Bv7i1gyHUkFj7oSaWlu3ovC8GxNso5fluz3qLVhBVeT/Lz7ZTHyxL5dDalQWPVOsQWtRnuwXaFTj9jVVh8pma9JjZ4zYSj5ZOyma7uuolffh+IR5aW97cHuBUXKGKXlKjn+DiHWMcYJah4N0lYCMlJOXipGRr5qdgoSTrqWSq6WFl2ypoaUAAAIfkECQEAAQAsAAAAACgAKAAAApaEb6HLgd/iO7FNWtcFWe+ufODGjRfoiJ2akShbueb0wtI50zm02pbvwfWEMWBQ1zKGlLIhskiEPm9R6vRXxV4ZzWT2yHOGpWMyorblKlNp8HmHEb/lCXjcW7bmtXP8Xt229OVWR1fod2eWqNfHuMjXCPkIGNileOiImVmCOEmoSfn3yXlJWmoHGhqp6ilYuWYpmTqKUgAAIfkECQEAAQAsAAAAACgAKAAAApiEH6kb58biQ3FNWtMFWW3eNVcojuFGfqnZqSebuS06w5V80/X02pKe8zFwP6EFWOT1lDFk8rGERh1TTNOocQ61Hm4Xm2VexUHpzjymViHrFbiELsefVrn6XKfnt2Q9G/+Xdie499XHd2g4h7ioOGhXGJboGAnXSBnoBwKYyfioubZJ2Hn0RuRZaflZOil56Zp6iioKSXpUAAAh+QQJAQABACwAAAAAKAAoAAACkoQRqRvnxuI7kU1a1UU5bd5tnSeOZXhmn5lWK3qNTWvRdQxP8qvaC+/yaYQzXO7BMvaUEmJRd3TsiMAgswmNYrSgZdYrTX6tSHGZO73ezuAw2uxuQ+BbeZfMxsexY35+/Qe4J1inV0g4x3WHuMhIl2jXOKT2Q+VU5fgoSUI52VfZyfkJGkha6jmY+aaYdirq+lQAACH5BAkBAAEALAAAAAAoACgAAAKWBIKpYe0L3YNKToqswUlvznigd4wiR4KhZrKt9Upqip61i9E3vMvxRdHlbEFiEXfk9YARYxOZZD6VQ2pUunBmtRXo1Lf8hMVVcNl8JafV38aM2/Fu5V16Bn63r6xt97j09+MXSFi4BniGFae3hzbH9+hYBzkpuUh5aZmHuanZOZgIuvbGiNeomCnaxxap2upaCZsq+1kAACH5BAkBAAEALAAAAAAoACgAAAKXjI8By5zf4kOxTVrXNVlv1X0d8IGZGKLnNpYtm8Lr9cqVeuOSvfOW79D9aDHizNhDJidFZhNydEahOaDH6nomtJjp1tutKoNWkvA6JqfRVLHU/QUfau9l2x7G54d1fl995xcIGAdXqMfBNadoYrhH+Mg2KBlpVpbluCiXmMnZ2Sh4GBqJ+ckIOqqJ6LmKSllZmsoq6wpQAAAh+QQJAQABACwAAAAAKAAoAAAClYx/oLvoxuJDkU1a1YUZbJ59nSd2ZXhWqbRa2/gF8Gu2DY3iqs7yrq+xBYEkYvFSM8aSSObE+ZgRl1BHFZNr7pRCavZ5BW2142hY3AN/zWtsmf12p9XxxFl2lpLn1rseztfXZjdIWIf2s5dItwjYKBgo9yg5pHgzJXTEeGlZuenpyPmpGQoKOWkYmSpaSnqKileI2FAAACH5BAkBAAEALAAAAAAoACgAAAKVjB+gu+jG4kORTVrVhRlsnn2dJ3ZleFaptFrb+CXmO9OozeL5VfP99HvAWhpiUdcwkpBH3825AwYdU8xTqlLGhtCosArKMpvfa1mMRae9VvWZfeB2XfPkeLmm18lUcBj+p5dnN8jXZ3YIGEhYuOUn45aoCDkp16hl5IjYJvjWKcnoGQpqyPlpOhr3aElaqrq56Bq7VAAAOw==");height:100%;filter:alpha(opacity=25);opacity:0.25}.ui-progressbar-indeterminate .ui-progressbar-value{background-image:none}.ui-resizable{position:relative}.ui-resizable-handle{position:absolute;font-size:0.1px;display:block;-ms-touch-action:none;touch-action:none}.ui-resizable-disabled .ui-resizable-handle,.ui-resizable-autohide .ui-resizable-handle{display:none}.ui-resizable-n{cursor:n-resize;height:7px;width:100%;top:-5px;left:0}.ui-resizable-s{cursor:s-resize;height:7px;width:100%;bottom:-5px;left:0}.ui-resizable-e{cursor:e-resize;width:7px;right:-5px;top:0;height:100%}.ui-resizable-w{cursor:w-resize;width:7px;left:-5px;top:0;height:100%}.ui-resizable-se{cursor:se-resize;width:12px;height:12px;right:1px;bottom:1px}.ui-resizable-sw{cursor:sw-resize;width:9px;height:9px;left:-5px;bottom:-5px}.ui-resizable-nw{cursor:nw-resize;width:9px;height:9px;left:-5px;top:-5px}.ui-resizable-ne{cursor:ne-resize;width:9px;height:9px;right:-5px;top:-5px}.ui-selectable{-ms-touch-action:none;touch-action:none}.ui-selectable-helper{position:absolute;z-index:100;border:1px dotted black}.ui-selectmenu-menu{padding:0;margin:0;position:absolute;top:0;left:0;display:none}.ui-selectmenu-menu .ui-menu{overflow:auto;overflow-x:hidden;padding-bottom:1px}.ui-selectmenu-menu .ui-menu .ui-selectmenu-optgroup{font-size:1em;font-weight:bold;line-height:1.5;padding:2px 0.4em;margin:0.5em 0 0 0;height:auto;border:0}.ui-selectmenu-open{display:block}.ui-selectmenu-button{display:inline-block;overflow:hidden;position:relative;text-decoration:none;cursor:pointer}.ui-selectmenu-button span.ui-icon{right:0.5em;left:auto;margin-top:-8px;position:absolute;top:50%}.ui-selectmenu-button span.ui-selectmenu-text{text-align:left;padding:0.4em 2.1em 0.4em 1em;display:block;line-height:1.4;overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.ui-slider{position:relative;text-align:left}.ui-slider .ui-slider-handle{position:absolute;z-index:2;width:1.2em;height:1.2em;cursor:default;-ms-touch-action:none;touch-action:none}.ui-slider .ui-slider-range{position:absolute;z-index:1;font-size:.7em;display:block;border:0;background-position:0 0}.ui-slider.ui-state-disabled .ui-slider-handle,.ui-slider.ui-state-disabled .ui-slider-range{filter:inherit}.ui-slider-horizontal{height:.8em}.ui-slider-horizontal .ui-slider-handle{top:-.3em;margin-left:-.6em}.ui-slider-horizontal .ui-slider-range{top:0;height:100%}.ui-slider-horizontal .ui-slider-range-min{left:0}.ui-slider-horizontal .ui-slider-range-max{right:0}.ui-slider-vertical{width:.8em;height:100px}.ui-slider-vertical .ui-slider-handle{left:-.3em;margin-left:0;margin-bottom:-.6em}.ui-slider-vertical .ui-slider-range{left:0;width:100%}.ui-slider-vertical .ui-slider-range-min{bottom:0}.ui-slider-vertical .ui-slider-range-max{top:0}.ui-sortable-handle{-ms-touch-action:none;touch-action:none}.ui-spinner{position:relative;display:inline-block;overflow:hidden;padding:0;vertical-align:middle}.ui-spinner-input{border:none;background:none;color:inherit;padding:0;margin:.2em 0;vertical-align:middle;margin-left:.4em;margin-right:22px}.ui-spinner-button{width:16px;height:50%;font-size:.5em;padding:0;margin:0;text-align:center;position:absolute;cursor:default;display:block;overflow:hidden;right:0}.ui-spinner a.ui-spinner-button{border-top:none;border-bottom:none;border-right:none}.ui-spinner .ui-icon{position:absolute;margin-top:-8px;top:50%;left:0}.ui-spinner-up{top:0}.ui-spinner-down{bottom:0}.ui-spinner .ui-icon-triangle-1-s{background-position:-65px -16px}.ui-tabs{position:relative;padding:.2em}.ui-tabs .ui-tabs-nav{margin:0;padding:.2em .2em 0}.ui-tabs .ui-tabs-nav li{list-style:none;float:left;position:relative;top:0;margin:1px .2em 0 0;border-bottom-width:0;padding:0;white-space:nowrap}.ui-tabs .ui-tabs-nav .ui-tabs-anchor{float:left;padding:.5em 1em;text-decoration:none}.ui-tabs .ui-tabs-nav li.ui-tabs-active{margin-bottom:-1px;padding-bottom:1px}.ui-tabs .ui-tabs-nav li.ui-tabs-active .ui-tabs-anchor,.ui-tabs .ui-tabs-nav li.ui-state-disabled .ui-tabs-anchor,.ui-tabs .ui-tabs-nav li.ui-tabs-loading .ui-tabs-anchor{cursor:text}.ui-tabs-collapsible .ui-tabs-nav li.ui-tabs-active .ui-tabs-anchor{cursor:pointer}.ui-tabs .ui-tabs-panel{display:block;border-width:0;padding:1em 1.4em;background:none}.ui-tooltip{padding:8px;position:absolute;z-index:9999;max-width:300px;-webkit-box-shadow:0 0 5px #aaa;box-shadow:0 0 5px #aaa}body .ui-tooltip{border-width:2px}.ui-widget{font-family:Trebuchet MS,Tahoma,Verdana,Arial,sans-serif;font-size:1.1em}.ui-widget .ui-widget{font-size:1em}.ui-widget input,.ui-widget select,.ui-widget textarea,.ui-widget button{font-family:Trebuchet MS,Tahoma,Verdana,Arial,sans-serif;font-size:1em}.ui-widget-content{border:1px solid #ddd;background:#eee url("images/ui-bg_highlight-soft_100_eeeeee_1x100.png") 50% top repeat-x;color:#333}.ui-widget-content a{color:#333}.ui-widget-header{border:1px solid #e78f08;background:#f6a828 url("images/ui-bg_gloss-wave_35_f6a828_500x100.png") 50% 50% repeat-x;color:#fff;font-weight:bold}.ui-widget-header a{color:#fff}.ui-state-default,.ui-widget-content .ui-state-default,.ui-widget-header .ui-state-default{border:1px solid #ccc;background:#f6f6f6 url("images/ui-bg_glass_100_f6f6f6_1x400.png") 50% 50% repeat-x;font-weight:bold;color:#1c94c4}.ui-state-default a,.ui-state-default a:link,.ui-state-default a:visited{color:#1c94c4;text-decoration:none}.ui-state-hover,.ui-widget-content .ui-state-hover,.ui-widget-header .ui-state-hover,.ui-state-focus,.ui-widget-content .ui-state-focus,.ui-widget-header .ui-state-focus{border:1px solid #fbcb09;background:#fdf5ce url("images/ui-bg_glass_100_fdf5ce_1x400.png") 50% 50% repeat-x;font-weight:bold;color:#c77405}.ui-state-hover a,.ui-state-hover a:hover,.ui-state-hover a:link,.ui-state-hover a:visited,.ui-state-focus a,.ui-state-focus a:hover,.ui-state-focus a:link,.ui-state-focus a:visited{color:#c77405;text-decoration:none}.ui-state-active,.ui-widget-content .ui-state-active,.ui-widget-header .ui-state-active{border:1px solid #fbd850;background:#fff url("images/ui-bg_glass_65_ffffff_1x400.png") 50% 50% repeat-x;font-weight:bold;color:#eb8f00}.ui-state-active a,.ui-state-active a:link,.ui-state-active a:visited{color:#eb8f00;text-decoration:none}.ui-state-highlight,.ui-widget-content .ui-state-highlight,.ui-widget-header .ui-state-highlight{border:1px solid #fed22f;background:#ffe45c url("images/ui-bg_highlight-soft_75_ffe45c_1x100.png") 50% top repeat-x;color:#363636}.ui-state-highlight a,.ui-widget-content .ui-state-highlight a,.ui-widget-header .ui-state-highlight a{color:#363636}.ui-state-error,.ui-widget-content .ui-state-error,.ui-widget-header .ui-state-error{border:1px solid #cd0a0a;background:#b81900 url("images/ui-bg_diagonals-thick_18_b81900_40x40.png") 50% 50% repeat;color:#fff}.ui-state-error a,.ui-widget-content .ui-state-error a,.ui-widget-header .ui-state-error a{color:#fff}.ui-state-error-text,.ui-widget-content .ui-state-error-text,.ui-widget-header .ui-state-error-text{color:#fff}.ui-priority-primary,.ui-widget-content .ui-priority-primary,.ui-widget-header .ui-priority-primary{font-weight:bold}.ui-priority-secondary,.ui-widget-content .ui-priority-secondary,.ui-widget-header .ui-priority-secondary{opacity:.7;filter:Alpha(Opacity=70);font-weight:normal}.ui-state-disabled,.ui-widget-content .ui-state-disabled,.ui-widget-header .ui-state-disabled{opacity:.35;filter:Alpha(Opacity=35);background-image:none}.ui-state-disabled .ui-icon{filter:Alpha(Opacity=35)}.ui-icon{width:16px;height:16px}.ui-icon,.ui-widget-content .ui-icon{background-image:url("images/ui-icons_222222_256x240.png")}.ui-widget-header .ui-icon{background-image:url("images/ui-icons_ffffff_256x240.png")}.ui-state-default .ui-icon{background-image:url("images/ui-icons_ef8c08_256x240.png")}.ui-state-hover .ui-icon,.ui-state-focus .ui-icon{background-image:url("images/ui-icons_ef8c08_256x240.png")}.ui-state-active .ui-icon{background-image:url("images/ui-icons_ef8c08_256x240.png")}.ui-state-highlight .ui-icon{background-image:url("images/ui-icons_228ef1_256x240.png")}.ui-state-error .ui-icon,.ui-state-error-text .ui-icon{background-image:url("images/ui-icons_ffd27a_256x240.png")}.ui-icon-blank{background-position:16px 16px}.ui-icon-carat-1-n{background-position:0 0}.ui-icon-carat-1-ne{background-position:-16px 0}.ui-icon-carat-1-e{background-position:-32px 0}.ui-icon-carat-1-se{background-position:-48px 0}.ui-icon-carat-1-s{background-position:-64px 0}.ui-icon-carat-1-sw{background-position:-80px 0}.ui-icon-carat-1-w{background-position:-96px 0}.ui-icon-carat-1-nw{background-position:-112px 0}.ui-icon-carat-2-n-s{background-position:-128px 0}.ui-icon-carat-2-e-w{background-position:-144px 0}.ui-icon-triangle-1-n{background-position:0 -16px}.ui-icon-triangle-1-ne{background-position:-16px -16px}.ui-icon-triangle-1-e{background-position:-32px -16px}.ui-icon-triangle-1-se{background-position:-48px -16px}.ui-icon-triangle-1-s{background-position:-64px -16px}.ui-icon-triangle-1-sw{background-position:-80px -16px}.ui-icon-triangle-1-w{background-position:-96px -16px}.ui-icon-triangle-1-nw{background-position:-112px -16px}.ui-icon-triangle-2-n-s{background-position:-128px -16px}.ui-icon-triangle-2-e-w{background-position:-144px -16px}.ui-icon-arrow-1-n{background-position:0 -32px}.ui-icon-arrow-1-ne{background-position:-16px -32px}.ui-icon-arrow-1-e{background-position:-32px -32px}.ui-icon-arrow-1-se{background-position:-48px -32px}.ui-icon-arrow-1-s{background-position:-64px -32px}.ui-icon-arrow-1-sw{background-position:-80px -32px}.ui-icon-arrow-1-w{background-position:-96px -32px}.ui-icon-arrow-1-nw{background-position:-112px -32px}.ui-icon-arrow-2-n-s{background-position:-128px -32px}.ui-icon-arrow-2-ne-sw{background-position:-144px -32px}.ui-icon-arrow-2-e-w{background-position:-160px -32px}.ui-icon-arrow-2-se-nw{background-position:-176px -32px}.ui-icon-arrowstop-1-n{background-position:-192px -32px}.ui-icon-arrowstop-1-e{background-position:-208px -32px}.ui-icon-arrowstop-1-s{background-position:-224px -32px}.ui-icon-arrowstop-1-w{background-position:-240px -32px}.ui-icon-arrowthick-1-n{background-position:0 -48px}.ui-icon-arrowthick-1-ne{background-position:-16px -48px}.ui-icon-arrowthick-1-e{background-position:-32px -48px}.ui-icon-arrowthick-1-se{background-position:-48px -48px}.ui-icon-arrowthick-1-s{background-position:-64px -48px}.ui-icon-arrowthick-1-sw{background-position:-80px -48px}.ui-icon-arrowthick-1-w{background-position:-96px -48px}.ui-icon-arrowthick-1-nw{background-position:-112px -48px}.ui-icon-arrowthick-2-n-s{background-position:-128px -48px}.ui-icon-arrowthick-2-ne-sw{background-position:-144px -48px}.ui-icon-arrowthick-2-e-w{background-position:-160px -48px}.ui-icon-arrowthick-2-se-nw{background-position:-176px -48px}.ui-icon-arrowthickstop-1-n{background-position:-192px -48px}.ui-icon-arrowthickstop-1-e{background-position:-208px -48px}.ui-icon-arrowthickstop-1-s{background-position:-224px -48px}.ui-icon-arrowthickstop-1-w{background-position:-240px -48px}.ui-icon-arrowreturnthick-1-w{background-position:0 -64px}.ui-icon-arrowreturnthick-1-n{background-position:-16px -64px}.ui-icon-arrowreturnthick-1-e{background-position:-32px -64px}.ui-icon-arrowreturnthick-1-s{background-position:-48px -64px}.ui-icon-arrowreturn-1-w{background-position:-64px -64px}.ui-icon-arrowreturn-1-n{background-position:-80px -64px}.ui-icon-arrowreturn-1-e{background-position:-96px -64px}.ui-icon-arrowreturn-1-s{background-position:-112px -64px}.ui-icon-arrowrefresh-1-w{background-position:-128px -64px}.ui-icon-arrowrefresh-1-n{background-position:-144px -64px}.ui-icon-arrowrefresh-1-e{background-position:-160px -64px}.ui-icon-arrowrefresh-1-s{background-position:-176px -64px}.ui-icon-arrow-4{background-position:0 -80px}.ui-icon-arrow-4-diag{background-position:-16px -80px}.ui-icon-extlink{background-position:-32px -80px}.ui-icon-newwin{background-position:-48px -80px}.ui-icon-refresh{background-position:-64px -80px}.ui-icon-shuffle{background-position:-80px -80px}.ui-icon-transfer-e-w{background-position:-96px -80px}.ui-icon-transferthick-e-w{background-position:-112px -80px}.ui-icon-folder-collapsed{background-position:0 -96px}.ui-icon-folder-open{background-position:-16px -96px}.ui-icon-document{background-position:-32px -96px}.ui-icon-document-b{background-position:-48px -96px}.ui-icon-note{background-position:-64px -96px}.ui-icon-mail-closed{background-position:-80px -96px}.ui-icon-mail-open{background-position:-96px -96px}.ui-icon-suitcase{background-position:-112px -96px}.ui-icon-comment{background-position:-128px -96px}.ui-icon-person{background-position:-144px -96px}.ui-icon-print{background-position:-160px -96px}.ui-icon-trash{background-position:-176px -96px}.ui-icon-locked{background-position:-192px -96px}.ui-icon-unlocked{background-position:-208px -96px}.ui-icon-bookmark{background-position:-224px -96px}.ui-icon-tag{background-position:-240px -96px}.ui-icon-home{background-position:0 -112px}.ui-icon-flag{background-position:-16px -112px}.ui-icon-calendar{background-position:-32px -112px}.ui-icon-cart{background-position:-48px -112px}.ui-icon-pencil{background-position:-64px -112px}.ui-icon-clock{background-position:-80px -112px}.ui-icon-disk{background-position:-96px -112px}.ui-icon-calculator{background-position:-112px -112px}.ui-icon-zoomin{background-position:-128px -112px}.ui-icon-zoomout{background-position:-144px -112px}.ui-icon-search{background-position:-160px -112px}.ui-icon-wrench{background-position:-176px -112px}.ui-icon-gear{background-position:-192px -112px}.ui-icon-heart{background-position:-208px -112px}.ui-icon-star{background-position:-224px -112px}.ui-icon-link{background-position:-240px -112px}.ui-icon-cancel{background-position:0 -128px}.ui-icon-plus{background-position:-16px -128px}.ui-icon-plusthick{background-position:-32px -128px}.ui-icon-minus{background-position:-48px -128px}.ui-icon-minusthick{background-position:-64px -128px}.ui-icon-close{background-position:-80px -128px}.ui-icon-closethick{background-position:-96px -128px}.ui-icon-key{background-position:-112px -128px}.ui-icon-lightbulb{background-position:-128px -128px}.ui-icon-scissors{background-position:-144px -128px}.ui-icon-clipboard{background-position:-160px -128px}.ui-icon-copy{background-position:-176px -128px}.ui-icon-contact{background-position:-192px -128px}.ui-icon-image{background-position:-208px -128px}.ui-icon-video{background-position:-224px -128px}.ui-icon-script{background-position:-240px -128px}.ui-icon-alert{background-position:0 -144px}.ui-icon-info{background-position:-16px -144px}.ui-icon-notice{background-position:-32px -144px}.ui-icon-help{background-position:-48px -144px}.ui-icon-check{background-position:-64px -144px}.ui-icon-bullet{background-position:-80px -144px}.ui-icon-radio-on{background-position:-96px -144px}.ui-icon-radio-off{background-position:-112px -144px}.ui-icon-pin-w{background-position:-128px -144px}.ui-icon-pin-s{background-position:-144px -144px}.ui-icon-play{background-position:0 -160px}.ui-icon-pause{background-position:-16px -160px}.ui-icon-seek-next{background-position:-32px -160px}.ui-icon-seek-prev{background-position:-48px -160px}.ui-icon-seek-end{background-position:-64px -160px}.ui-icon-seek-start{background-position:-80px -160px}.ui-icon-seek-first{background-position:-80px -160px}.ui-icon-stop{background-position:-96px -160px}.ui-icon-eject{background-position:-112px -160px}.ui-icon-volume-off{background-position:-128px -160px}.ui-icon-volume-on{background-position:-144px -160px}.ui-icon-power{background-position:0 -176px}.ui-icon-signal-diag{background-position:-16px -176px}.ui-icon-signal{background-position:-32px -176px}.ui-icon-battery-0{background-position:-48px -176px}.ui-icon-battery-1{background-position:-64px -176px}.ui-icon-battery-2{background-position:-80px -176px}.ui-icon-battery-3{background-position:-96px -176px}.ui-icon-circle-plus{background-position:0 -192px}.ui-icon-circle-minus{background-position:-16px -192px}.ui-icon-circle-close{background-position:-32px -192px}.ui-icon-circle-triangle-e{background-position:-48px -192px}.ui-icon-circle-triangle-s{background-position:-64px -192px}.ui-icon-circle-triangle-w{background-position:-80px -192px}.ui-icon-circle-triangle-n{background-position:-96px -192px}.ui-icon-circle-arrow-e{background-position:-112px -192px}.ui-icon-circle-arrow-s{background-position:-128px -192px}.ui-icon-circle-arrow-w{background-position:-144px -192px}.ui-icon-circle-arrow-n{background-position:-160px -192px}.ui-icon-circle-zoomin{background-position:-176px -192px}.ui-icon-circle-zoomout{background-position:-192px -192px}.ui-icon-circle-check{background-position:-208px -192px}.ui-icon-circlesmall-plus{background-position:0 -208px}.ui-icon-circlesmall-minus{background-position:-16px -208px}.ui-icon-circlesmall-close{background-position:-32px -208px}.ui-icon-squaresmall-plus{background-position:-48px -208px}.ui-icon-squaresmall-minus{background-position:-64px -208px}.ui-icon-squaresmall-close{background-position:-80px -208px}.ui-icon-grip-dotted-vertical{background-position:0 -224px}.ui-icon-grip-dotted-horizontal{background-position:-16px -224px}.ui-icon-grip-solid-vertical{background-position:-32px -224px}.ui-icon-grip-solid-horizontal{background-position:-48px -224px}.ui-icon-gripsmall-diagonal-se{background-position:-64px -224px}.ui-icon-grip-diagonal-se{background-position:-80px -224px}.ui-corner-all,.ui-corner-top,.ui-corner-left,.ui-corner-tl{border-top-left-radius:4px}.ui-corner-all,.ui-corner-top,.ui-corner-right,.ui-corner-tr{border-top-right-radius:4px}.ui-corner-all,.ui-corner-bottom,.ui-corner-left,.ui-corner-bl{border-bottom-left-radius:4px}.ui-corner-all,.ui-corner-bottom,.ui-corner-right,.ui-corner-br{border-bottom-right-radius:4px}.ui-widget-overlay{background:#666 url("images/ui-bg_diagonals-thick_20_666666_40x40.png") 50% 50% repeat;opacity:.5;filter:Alpha(Opacity=50)}.ui-widget-shadow{margin:-5px 0 0 -5px;padding:5px;background:#000 url("images/ui-bg_flat_10_000000_40x100.png") 50% 50% repeat-x;opacity:.2;filter:Alpha(Opacity=20);border-radius:5px} \ No newline at end of file diff --git a/static/fonts/FontAwesome.otf b/static/fonts/FontAwesome.otf deleted file mode 100644 index f7936cc1e..000000000 Binary files a/static/fonts/FontAwesome.otf and /dev/null differ diff --git a/static/fonts/fontawesome-webfont.eot b/static/fonts/fontawesome-webfont.eot deleted file mode 100644 index 33b2bb800..000000000 Binary files a/static/fonts/fontawesome-webfont.eot and /dev/null differ diff --git a/static/fonts/fontawesome-webfont.svg b/static/fonts/fontawesome-webfont.svg deleted file mode 100644 index 1ee89d436..000000000 --- a/static/fonts/fontawesome-webfont.svg +++ /dev/null @@ -1,565 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/static/fonts/fontawesome-webfont.ttf b/static/fonts/fontawesome-webfont.ttf deleted file mode 100644 index ed9372f8e..000000000 Binary files a/static/fonts/fontawesome-webfont.ttf and /dev/null differ diff --git a/static/fonts/fontawesome-webfont.woff b/static/fonts/fontawesome-webfont.woff deleted file mode 100644 index 8b280b98f..000000000 Binary files a/static/fonts/fontawesome-webfont.woff and /dev/null differ diff --git a/static/fonts/fontawesome-webfont.woff2 b/static/fonts/fontawesome-webfont.woff2 deleted file mode 100644 index 3311d5851..000000000 Binary files a/static/fonts/fontawesome-webfont.woff2 and /dev/null differ diff --git a/static/fonts/fontello.eot b/static/fonts/fontello.eot deleted file mode 100644 index f63ffa043..000000000 Binary files a/static/fonts/fontello.eot and /dev/null differ diff --git a/static/fonts/fontello.svg b/static/fonts/fontello.svg deleted file mode 100644 index 2db13984a..000000000 --- a/static/fonts/fontello.svg +++ /dev/null @@ -1,33 +0,0 @@ - - - -Copyright (C) 2013 by original authors @ fontello.com - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/static/fonts/fontello.ttf b/static/fonts/fontello.ttf deleted file mode 100644 index 95715f866..000000000 Binary files a/static/fonts/fontello.ttf and /dev/null differ diff --git a/static/fonts/fontello.woff b/static/fonts/fontello.woff deleted file mode 100644 index 084f0c55c..000000000 Binary files a/static/fonts/fontello.woff and /dev/null differ diff --git a/static/fonts/forkawesome-webfont.eot b/static/fonts/forkawesome-webfont.eot new file mode 100644 index 000000000..c2c24b416 Binary files /dev/null and b/static/fonts/forkawesome-webfont.eot differ diff --git a/static/fonts/forkawesome-webfont.svg b/static/fonts/forkawesome-webfont.svg new file mode 100644 index 000000000..bd45b3049 --- /dev/null +++ b/static/fonts/forkawesome-webfont.svg @@ -0,0 +1,3232 @@ + + + + + +Created by FontForge 20190801 at Fri Aug 27 00:07:49 2021 + By shine +The Fork Awesome font is licensed under the SIL OFL 1.1 (http://scripts.sil.org/OFL). Fork Awesome is a fork based of off Font Awesome 4.7.0 by Dave Gandy. More info on licenses at https://forkawesome.github.io + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/static/fonts/forkawesome-webfont.ttf b/static/fonts/forkawesome-webfont.ttf new file mode 100644 index 000000000..1f1d8f34c Binary files /dev/null and b/static/fonts/forkawesome-webfont.ttf differ diff --git a/static/fonts/forkawesome-webfont.woff b/static/fonts/forkawesome-webfont.woff new file mode 100644 index 000000000..cca43af44 Binary files /dev/null and b/static/fonts/forkawesome-webfont.woff differ diff --git a/static/fonts/forkawesome-webfont.woff2 b/static/fonts/forkawesome-webfont.woff2 new file mode 100644 index 000000000..c96e5bf14 Binary files /dev/null and b/static/fonts/forkawesome-webfont.woff2 differ diff --git a/static/images/generic_cover_full.png b/static/images/generic_cover_full.png new file mode 100644 index 000000000..792c77a52 Binary files /dev/null and b/static/images/generic_cover_full.png differ diff --git a/static/images/generic_cover_thumb.png b/static/images/generic_cover_thumb.png new file mode 100644 index 000000000..594bf4cf3 Binary files /dev/null and b/static/images/generic_cover_thumb.png differ diff --git a/static/images/icons/mastodon-hover.png b/static/images/icons/mastodon-hover.png new file mode 100644 index 000000000..34943301e Binary files /dev/null and b/static/images/icons/mastodon-hover.png differ diff --git a/static/images/icons/mastodon.png b/static/images/icons/mastodon.png new file mode 100644 index 000000000..1086c3431 Binary files /dev/null and b/static/images/icons/mastodon.png differ diff --git a/static/images/icons/twitter-hover.png b/static/images/icons/twitter-hover.png deleted file mode 100644 index f15a59d32..000000000 Binary files a/static/images/icons/twitter-hover.png and /dev/null differ diff --git a/static/images/icons/twitter.png b/static/images/icons/twitter.png deleted file mode 100755 index 9f028cdd0..000000000 Binary files a/static/images/icons/twitter.png and /dev/null differ diff --git a/static/images/reader/loader.gif b/static/images/reader/loader.gif deleted file mode 100644 index 68005bcbe..000000000 Binary files a/static/images/reader/loader.gif and /dev/null differ diff --git a/static/js/greenpanel.js b/static/js/greenpanel.js new file mode 100644 index 000000000..c7a129c1b --- /dev/null +++ b/static/js/greenpanel.js @@ -0,0 +1,14 @@ +var $j = jQuery.noConflict(); + +$j().ready(function(){ + var contentblock = $j('#content-block, .user-block-hide'); + contentblock.on('mouseenter', '.panelview', function() { + $j(this).children('.panelfront').removeClass('side1').addClass('side2'); + $j(this).children('.panelback').removeClass('side2').addClass('side1'); + }); + + contentblock.on('mouseleave', '.panelview', function() { + $j(this).children('.panelback').removeClass('side1').addClass('side2'); + $j(this).children('.panelfront').removeClass('side2').addClass('side1'); + }); +}); diff --git a/static/js/import_books.js b/static/js/import_books.js index a45f0ac26..0ba56e44c 100644 --- a/static/js/import_books.js +++ b/static/js/import_books.js @@ -18,37 +18,8 @@ jQuery(document).ready(function($) { return false; }); - $('#load_shelf_form').submit(function(){ - // do ajax call to pick up the list of shelves - - if ($('#id_goodreads_shelf_name_number').length == 0) { - - var params = {}; - $.getJSON('/goodreads/shelves', params, function(json) { - // say waiting - $('#goodreads_input').attr('value', 'Loading....'); - var sel = $('').appendTo('#goodreads_shelves'); - if (json.total_book_count === 1) { - $('').appendTo(sel); - } else { - $('').appendTo(sel); - } - for (var i = 0; i < json.user_shelves.length; i++) { - if (json.user_shelves[i].book_count === 1) { - $('').appendTo(sel); - } else { - $('').appendTo(sel); - } - } - $('#load_gr_shelves_list').attr('id','load_shelf_form'); - $('#goodreads_input').attr('value', 'Add this shelf'); - }); - - } else { - post_and_alert('/goodreads/load_shelf/')(false,'#load_shelf_form',$('#load_shelf_form').serialize()); + } return false; diff --git a/static/js/jquery-ui-1.11.4.min.js b/static/js/jquery-ui-1.11.4.min.js new file mode 100644 index 000000000..5824d1292 --- /dev/null +++ b/static/js/jquery-ui-1.11.4.min.js @@ -0,0 +1,13 @@ +/*! jQuery UI - v1.11.4 - 2015-03-11 +* http://jqueryui.com +* Includes: core.js, widget.js, mouse.js, position.js, accordion.js, autocomplete.js, button.js, datepicker.js, dialog.js, draggable.js, droppable.js, effect.js, effect-blind.js, effect-bounce.js, effect-clip.js, effect-drop.js, effect-explode.js, effect-fade.js, effect-fold.js, effect-highlight.js, effect-puff.js, effect-pulsate.js, effect-scale.js, effect-shake.js, effect-size.js, effect-slide.js, effect-transfer.js, menu.js, progressbar.js, resizable.js, selectable.js, selectmenu.js, slider.js, sortable.js, spinner.js, tabs.js, tooltip.js +* Copyright 2015 jQuery Foundation and other contributors; Licensed MIT */ + +(function(e){"function"==typeof define&&define.amd?define(["jquery"],e):e(jQuery)})(function(e){function t(t,s){var n,a,o,r=t.nodeName.toLowerCase();return"area"===r?(n=t.parentNode,a=n.name,t.href&&a&&"map"===n.nodeName.toLowerCase()?(o=e("img[usemap='#"+a+"']")[0],!!o&&i(o)):!1):(/^(input|select|textarea|button|object)$/.test(r)?!t.disabled:"a"===r?t.href||s:s)&&i(t)}function i(t){return e.expr.filters.visible(t)&&!e(t).parents().addBack().filter(function(){return"hidden"===e.css(this,"visibility")}).length}function s(e){for(var t,i;e.length&&e[0]!==document;){if(t=e.css("position"),("absolute"===t||"relative"===t||"fixed"===t)&&(i=parseInt(e.css("zIndex"),10),!isNaN(i)&&0!==i))return i;e=e.parent()}return 0}function n(){this._curInst=null,this._keyEvent=!1,this._disabledInputs=[],this._datepickerShowing=!1,this._inDialog=!1,this._mainDivId="ui-datepicker-div",this._inlineClass="ui-datepicker-inline",this._appendClass="ui-datepicker-append",this._triggerClass="ui-datepicker-trigger",this._dialogClass="ui-datepicker-dialog",this._disableClass="ui-datepicker-disabled",this._unselectableClass="ui-datepicker-unselectable",this._currentClass="ui-datepicker-current-day",this._dayOverClass="ui-datepicker-days-cell-over",this.regional=[],this.regional[""]={closeText:"Done",prevText:"Prev",nextText:"Next",currentText:"Today",monthNames:["January","February","March","April","May","June","July","August","September","October","November","December"],monthNamesShort:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"],dayNames:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],dayNamesShort:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],dayNamesMin:["Su","Mo","Tu","We","Th","Fr","Sa"],weekHeader:"Wk",dateFormat:"mm/dd/yy",firstDay:0,isRTL:!1,showMonthAfterYear:!1,yearSuffix:""},this._defaults={showOn:"focus",showAnim:"fadeIn",showOptions:{},defaultDate:null,appendText:"",buttonText:"...",buttonImage:"",buttonImageOnly:!1,hideIfNoPrevNext:!1,navigationAsDateFormat:!1,gotoCurrent:!1,changeMonth:!1,changeYear:!1,yearRange:"c-10:c+10",showOtherMonths:!1,selectOtherMonths:!1,showWeek:!1,calculateWeek:this.iso8601Week,shortYearCutoff:"+10",minDate:null,maxDate:null,duration:"fast",beforeShowDay:null,beforeShow:null,onSelect:null,onChangeMonthYear:null,onClose:null,numberOfMonths:1,showCurrentAtPos:0,stepMonths:1,stepBigMonths:12,altField:"",altFormat:"",constrainInput:!0,showButtonPanel:!1,autoSize:!1,disabled:!1},e.extend(this._defaults,this.regional[""]),this.regional.en=e.extend(!0,{},this.regional[""]),this.regional["en-US"]=e.extend(!0,{},this.regional.en),this.dpDiv=a(e("
    "))}function a(t){var i="button, .ui-datepicker-prev, .ui-datepicker-next, .ui-datepicker-calendar td a";return t.delegate(i,"mouseout",function(){e(this).removeClass("ui-state-hover"),-1!==this.className.indexOf("ui-datepicker-prev")&&e(this).removeClass("ui-datepicker-prev-hover"),-1!==this.className.indexOf("ui-datepicker-next")&&e(this).removeClass("ui-datepicker-next-hover")}).delegate(i,"mouseover",o)}function o(){e.datepicker._isDisabledDatepicker(v.inline?v.dpDiv.parent()[0]:v.input[0])||(e(this).parents(".ui-datepicker-calendar").find("a").removeClass("ui-state-hover"),e(this).addClass("ui-state-hover"),-1!==this.className.indexOf("ui-datepicker-prev")&&e(this).addClass("ui-datepicker-prev-hover"),-1!==this.className.indexOf("ui-datepicker-next")&&e(this).addClass("ui-datepicker-next-hover"))}function r(t,i){e.extend(t,i);for(var s in i)null==i[s]&&(t[s]=i[s]);return t}function h(e){return function(){var t=this.element.val();e.apply(this,arguments),this._refresh(),t!==this.element.val()&&this._trigger("change")}}e.ui=e.ui||{},e.extend(e.ui,{version:"1.11.4",keyCode:{BACKSPACE:8,COMMA:188,DELETE:46,DOWN:40,END:35,ENTER:13,ESCAPE:27,HOME:36,LEFT:37,PAGE_DOWN:34,PAGE_UP:33,PERIOD:190,RIGHT:39,SPACE:32,TAB:9,UP:38}}),e.fn.extend({scrollParent:function(t){var i=this.css("position"),s="absolute"===i,n=t?/(auto|scroll|hidden)/:/(auto|scroll)/,a=this.parents().filter(function(){var t=e(this);return s&&"static"===t.css("position")?!1:n.test(t.css("overflow")+t.css("overflow-y")+t.css("overflow-x"))}).eq(0);return"fixed"!==i&&a.length?a:e(this[0].ownerDocument||document)},uniqueId:function(){var e=0;return function(){return this.each(function(){this.id||(this.id="ui-id-"+ ++e)})}}(),removeUniqueId:function(){return this.each(function(){/^ui-id-\d+$/.test(this.id)&&e(this).removeAttr("id")})}}),e.extend(e.expr[":"],{data:e.expr.createPseudo?e.expr.createPseudo(function(t){return function(i){return!!e.data(i,t)}}):function(t,i,s){return!!e.data(t,s[3])},focusable:function(i){return t(i,!isNaN(e.attr(i,"tabindex")))},tabbable:function(i){var s=e.attr(i,"tabindex"),n=isNaN(s);return(n||s>=0)&&t(i,!n)}}),e("").outerWidth(1).jquery||e.each(["Width","Height"],function(t,i){function s(t,i,s,a){return e.each(n,function(){i-=parseFloat(e.css(t,"padding"+this))||0,s&&(i-=parseFloat(e.css(t,"border"+this+"Width"))||0),a&&(i-=parseFloat(e.css(t,"margin"+this))||0)}),i}var n="Width"===i?["Left","Right"]:["Top","Bottom"],a=i.toLowerCase(),o={innerWidth:e.fn.innerWidth,innerHeight:e.fn.innerHeight,outerWidth:e.fn.outerWidth,outerHeight:e.fn.outerHeight};e.fn["inner"+i]=function(t){return void 0===t?o["inner"+i].call(this):this.each(function(){e(this).css(a,s(this,t)+"px")})},e.fn["outer"+i]=function(t,n){return"number"!=typeof t?o["outer"+i].call(this,t):this.each(function(){e(this).css(a,s(this,t,!0,n)+"px")})}}),e.fn.addBack||(e.fn.addBack=function(e){return this.add(null==e?this.prevObject:this.prevObject.filter(e))}),e("").data("a-b","a").removeData("a-b").data("a-b")&&(e.fn.removeData=function(t){return function(i){return arguments.length?t.call(this,e.camelCase(i)):t.call(this)}}(e.fn.removeData)),e.ui.ie=!!/msie [\w.]+/.exec(navigator.userAgent.toLowerCase()),e.fn.extend({focus:function(t){return function(i,s){return"number"==typeof i?this.each(function(){var t=this;setTimeout(function(){e(t).focus(),s&&s.call(t)},i)}):t.apply(this,arguments)}}(e.fn.focus),disableSelection:function(){var e="onselectstart"in document.createElement("div")?"selectstart":"mousedown";return function(){return this.bind(e+".ui-disableSelection",function(e){e.preventDefault()})}}(),enableSelection:function(){return this.unbind(".ui-disableSelection")},zIndex:function(t){if(void 0!==t)return this.css("zIndex",t);if(this.length)for(var i,s,n=e(this[0]);n.length&&n[0]!==document;){if(i=n.css("position"),("absolute"===i||"relative"===i||"fixed"===i)&&(s=parseInt(n.css("zIndex"),10),!isNaN(s)&&0!==s))return s;n=n.parent()}return 0}}),e.ui.plugin={add:function(t,i,s){var n,a=e.ui[t].prototype;for(n in s)a.plugins[n]=a.plugins[n]||[],a.plugins[n].push([i,s[n]])},call:function(e,t,i,s){var n,a=e.plugins[t];if(a&&(s||e.element[0].parentNode&&11!==e.element[0].parentNode.nodeType))for(n=0;a.length>n;n++)e.options[a[n][0]]&&a[n][1].apply(e.element,i)}};var l=0,u=Array.prototype.slice;e.cleanData=function(t){return function(i){var s,n,a;for(a=0;null!=(n=i[a]);a++)try{s=e._data(n,"events"),s&&s.remove&&e(n).triggerHandler("remove")}catch(o){}t(i)}}(e.cleanData),e.widget=function(t,i,s){var n,a,o,r,h={},l=t.split(".")[0];return t=t.split(".")[1],n=l+"-"+t,s||(s=i,i=e.Widget),e.expr[":"][n.toLowerCase()]=function(t){return!!e.data(t,n)},e[l]=e[l]||{},a=e[l][t],o=e[l][t]=function(e,t){return this._createWidget?(arguments.length&&this._createWidget(e,t),void 0):new o(e,t)},e.extend(o,a,{version:s.version,_proto:e.extend({},s),_childConstructors:[]}),r=new i,r.options=e.widget.extend({},r.options),e.each(s,function(t,s){return e.isFunction(s)?(h[t]=function(){var e=function(){return i.prototype[t].apply(this,arguments)},n=function(e){return i.prototype[t].apply(this,e)};return function(){var t,i=this._super,a=this._superApply;return this._super=e,this._superApply=n,t=s.apply(this,arguments),this._super=i,this._superApply=a,t}}(),void 0):(h[t]=s,void 0)}),o.prototype=e.widget.extend(r,{widgetEventPrefix:a?r.widgetEventPrefix||t:t},h,{constructor:o,namespace:l,widgetName:t,widgetFullName:n}),a?(e.each(a._childConstructors,function(t,i){var s=i.prototype;e.widget(s.namespace+"."+s.widgetName,o,i._proto)}),delete a._childConstructors):i._childConstructors.push(o),e.widget.bridge(t,o),o},e.widget.extend=function(t){for(var i,s,n=u.call(arguments,1),a=0,o=n.length;o>a;a++)for(i in n[a])s=n[a][i],n[a].hasOwnProperty(i)&&void 0!==s&&(t[i]=e.isPlainObject(s)?e.isPlainObject(t[i])?e.widget.extend({},t[i],s):e.widget.extend({},s):s);return t},e.widget.bridge=function(t,i){var s=i.prototype.widgetFullName||t;e.fn[t]=function(n){var a="string"==typeof n,o=u.call(arguments,1),r=this;return a?this.each(function(){var i,a=e.data(this,s);return"instance"===n?(r=a,!1):a?e.isFunction(a[n])&&"_"!==n.charAt(0)?(i=a[n].apply(a,o),i!==a&&void 0!==i?(r=i&&i.jquery?r.pushStack(i.get()):i,!1):void 0):e.error("no such method '"+n+"' for "+t+" widget instance"):e.error("cannot call methods on "+t+" prior to initialization; "+"attempted to call method '"+n+"'")}):(o.length&&(n=e.widget.extend.apply(null,[n].concat(o))),this.each(function(){var t=e.data(this,s);t?(t.option(n||{}),t._init&&t._init()):e.data(this,s,new i(n,this))})),r}},e.Widget=function(){},e.Widget._childConstructors=[],e.Widget.prototype={widgetName:"widget",widgetEventPrefix:"",defaultElement:"
    ",options:{disabled:!1,create:null},_createWidget:function(t,i){i=e(i||this.defaultElement||this)[0],this.element=e(i),this.uuid=l++,this.eventNamespace="."+this.widgetName+this.uuid,this.bindings=e(),this.hoverable=e(),this.focusable=e(),i!==this&&(e.data(i,this.widgetFullName,this),this._on(!0,this.element,{remove:function(e){e.target===i&&this.destroy()}}),this.document=e(i.style?i.ownerDocument:i.document||i),this.window=e(this.document[0].defaultView||this.document[0].parentWindow)),this.options=e.widget.extend({},this.options,this._getCreateOptions(),t),this._create(),this._trigger("create",null,this._getCreateEventData()),this._init()},_getCreateOptions:e.noop,_getCreateEventData:e.noop,_create:e.noop,_init:e.noop,destroy:function(){this._destroy(),this.element.unbind(this.eventNamespace).removeData(this.widgetFullName).removeData(e.camelCase(this.widgetFullName)),this.widget().unbind(this.eventNamespace).removeAttr("aria-disabled").removeClass(this.widgetFullName+"-disabled "+"ui-state-disabled"),this.bindings.unbind(this.eventNamespace),this.hoverable.removeClass("ui-state-hover"),this.focusable.removeClass("ui-state-focus")},_destroy:e.noop,widget:function(){return this.element},option:function(t,i){var s,n,a,o=t;if(0===arguments.length)return e.widget.extend({},this.options);if("string"==typeof t)if(o={},s=t.split("."),t=s.shift(),s.length){for(n=o[t]=e.widget.extend({},this.options[t]),a=0;s.length-1>a;a++)n[s[a]]=n[s[a]]||{},n=n[s[a]];if(t=s.pop(),1===arguments.length)return void 0===n[t]?null:n[t];n[t]=i}else{if(1===arguments.length)return void 0===this.options[t]?null:this.options[t];o[t]=i}return this._setOptions(o),this},_setOptions:function(e){var t;for(t in e)this._setOption(t,e[t]);return this},_setOption:function(e,t){return this.options[e]=t,"disabled"===e&&(this.widget().toggleClass(this.widgetFullName+"-disabled",!!t),t&&(this.hoverable.removeClass("ui-state-hover"),this.focusable.removeClass("ui-state-focus"))),this},enable:function(){return this._setOptions({disabled:!1})},disable:function(){return this._setOptions({disabled:!0})},_on:function(t,i,s){var n,a=this;"boolean"!=typeof t&&(s=i,i=t,t=!1),s?(i=n=e(i),this.bindings=this.bindings.add(i)):(s=i,i=this.element,n=this.widget()),e.each(s,function(s,o){function r(){return t||a.options.disabled!==!0&&!e(this).hasClass("ui-state-disabled")?("string"==typeof o?a[o]:o).apply(a,arguments):void 0}"string"!=typeof o&&(r.guid=o.guid=o.guid||r.guid||e.guid++);var h=s.match(/^([\w:-]*)\s*(.*)$/),l=h[1]+a.eventNamespace,u=h[2];u?n.delegate(u,l,r):i.bind(l,r)})},_off:function(t,i){i=(i||"").split(" ").join(this.eventNamespace+" ")+this.eventNamespace,t.unbind(i).undelegate(i),this.bindings=e(this.bindings.not(t).get()),this.focusable=e(this.focusable.not(t).get()),this.hoverable=e(this.hoverable.not(t).get())},_delay:function(e,t){function i(){return("string"==typeof e?s[e]:e).apply(s,arguments)}var s=this;return setTimeout(i,t||0)},_hoverable:function(t){this.hoverable=this.hoverable.add(t),this._on(t,{mouseenter:function(t){e(t.currentTarget).addClass("ui-state-hover")},mouseleave:function(t){e(t.currentTarget).removeClass("ui-state-hover")}})},_focusable:function(t){this.focusable=this.focusable.add(t),this._on(t,{focusin:function(t){e(t.currentTarget).addClass("ui-state-focus")},focusout:function(t){e(t.currentTarget).removeClass("ui-state-focus")}})},_trigger:function(t,i,s){var n,a,o=this.options[t];if(s=s||{},i=e.Event(i),i.type=(t===this.widgetEventPrefix?t:this.widgetEventPrefix+t).toLowerCase(),i.target=this.element[0],a=i.originalEvent)for(n in a)n in i||(i[n]=a[n]);return this.element.trigger(i,s),!(e.isFunction(o)&&o.apply(this.element[0],[i].concat(s))===!1||i.isDefaultPrevented())}},e.each({show:"fadeIn",hide:"fadeOut"},function(t,i){e.Widget.prototype["_"+t]=function(s,n,a){"string"==typeof n&&(n={effect:n});var o,r=n?n===!0||"number"==typeof n?i:n.effect||i:t;n=n||{},"number"==typeof n&&(n={duration:n}),o=!e.isEmptyObject(n),n.complete=a,n.delay&&s.delay(n.delay),o&&e.effects&&e.effects.effect[r]?s[t](n):r!==t&&s[r]?s[r](n.duration,n.easing,a):s.queue(function(i){e(this)[t](),a&&a.call(s[0]),i()})}}),e.widget;var d=!1;e(document).mouseup(function(){d=!1}),e.widget("ui.mouse",{version:"1.11.4",options:{cancel:"input,textarea,button,select,option",distance:1,delay:0},_mouseInit:function(){var t=this;this.element.bind("mousedown."+this.widgetName,function(e){return t._mouseDown(e)}).bind("click."+this.widgetName,function(i){return!0===e.data(i.target,t.widgetName+".preventClickEvent")?(e.removeData(i.target,t.widgetName+".preventClickEvent"),i.stopImmediatePropagation(),!1):void 0}),this.started=!1},_mouseDestroy:function(){this.element.unbind("."+this.widgetName),this._mouseMoveDelegate&&this.document.unbind("mousemove."+this.widgetName,this._mouseMoveDelegate).unbind("mouseup."+this.widgetName,this._mouseUpDelegate)},_mouseDown:function(t){if(!d){this._mouseMoved=!1,this._mouseStarted&&this._mouseUp(t),this._mouseDownEvent=t;var i=this,s=1===t.which,n="string"==typeof this.options.cancel&&t.target.nodeName?e(t.target).closest(this.options.cancel).length:!1;return s&&!n&&this._mouseCapture(t)?(this.mouseDelayMet=!this.options.delay,this.mouseDelayMet||(this._mouseDelayTimer=setTimeout(function(){i.mouseDelayMet=!0},this.options.delay)),this._mouseDistanceMet(t)&&this._mouseDelayMet(t)&&(this._mouseStarted=this._mouseStart(t)!==!1,!this._mouseStarted)?(t.preventDefault(),!0):(!0===e.data(t.target,this.widgetName+".preventClickEvent")&&e.removeData(t.target,this.widgetName+".preventClickEvent"),this._mouseMoveDelegate=function(e){return i._mouseMove(e)},this._mouseUpDelegate=function(e){return i._mouseUp(e)},this.document.bind("mousemove."+this.widgetName,this._mouseMoveDelegate).bind("mouseup."+this.widgetName,this._mouseUpDelegate),t.preventDefault(),d=!0,!0)):!0}},_mouseMove:function(t){if(this._mouseMoved){if(e.ui.ie&&(!document.documentMode||9>document.documentMode)&&!t.button)return this._mouseUp(t);if(!t.which)return this._mouseUp(t)}return(t.which||t.button)&&(this._mouseMoved=!0),this._mouseStarted?(this._mouseDrag(t),t.preventDefault()):(this._mouseDistanceMet(t)&&this._mouseDelayMet(t)&&(this._mouseStarted=this._mouseStart(this._mouseDownEvent,t)!==!1,this._mouseStarted?this._mouseDrag(t):this._mouseUp(t)),!this._mouseStarted)},_mouseUp:function(t){return this.document.unbind("mousemove."+this.widgetName,this._mouseMoveDelegate).unbind("mouseup."+this.widgetName,this._mouseUpDelegate),this._mouseStarted&&(this._mouseStarted=!1,t.target===this._mouseDownEvent.target&&e.data(t.target,this.widgetName+".preventClickEvent",!0),this._mouseStop(t)),d=!1,!1},_mouseDistanceMet:function(e){return Math.max(Math.abs(this._mouseDownEvent.pageX-e.pageX),Math.abs(this._mouseDownEvent.pageY-e.pageY))>=this.options.distance},_mouseDelayMet:function(){return this.mouseDelayMet},_mouseStart:function(){},_mouseDrag:function(){},_mouseStop:function(){},_mouseCapture:function(){return!0}}),function(){function t(e,t,i){return[parseFloat(e[0])*(p.test(e[0])?t/100:1),parseFloat(e[1])*(p.test(e[1])?i/100:1)]}function i(t,i){return parseInt(e.css(t,i),10)||0}function s(t){var i=t[0];return 9===i.nodeType?{width:t.width(),height:t.height(),offset:{top:0,left:0}}:e.isWindow(i)?{width:t.width(),height:t.height(),offset:{top:t.scrollTop(),left:t.scrollLeft()}}:i.preventDefault?{width:0,height:0,offset:{top:i.pageY,left:i.pageX}}:{width:t.outerWidth(),height:t.outerHeight(),offset:t.offset()}}e.ui=e.ui||{};var n,a,o=Math.max,r=Math.abs,h=Math.round,l=/left|center|right/,u=/top|center|bottom/,d=/[\+\-]\d+(\.[\d]+)?%?/,c=/^\w+/,p=/%$/,f=e.fn.position;e.position={scrollbarWidth:function(){if(void 0!==n)return n;var t,i,s=e("
    "),a=s.children()[0];return e("body").append(s),t=a.offsetWidth,s.css("overflow","scroll"),i=a.offsetWidth,t===i&&(i=s[0].clientWidth),s.remove(),n=t-i},getScrollInfo:function(t){var i=t.isWindow||t.isDocument?"":t.element.css("overflow-x"),s=t.isWindow||t.isDocument?"":t.element.css("overflow-y"),n="scroll"===i||"auto"===i&&t.widthi?"left":t>0?"right":"center",vertical:0>a?"top":s>0?"bottom":"middle"};d>m&&m>r(t+i)&&(h.horizontal="center"),c>g&&g>r(s+a)&&(h.vertical="middle"),h.important=o(r(t),r(i))>o(r(s),r(a))?"horizontal":"vertical",n.using.call(this,e,h)}),u.offset(e.extend(M,{using:l}))})},e.ui.position={fit:{left:function(e,t){var i,s=t.within,n=s.isWindow?s.scrollLeft:s.offset.left,a=s.width,r=e.left-t.collisionPosition.marginLeft,h=n-r,l=r+t.collisionWidth-a-n;t.collisionWidth>a?h>0&&0>=l?(i=e.left+h+t.collisionWidth-a-n,e.left+=h-i):e.left=l>0&&0>=h?n:h>l?n+a-t.collisionWidth:n:h>0?e.left+=h:l>0?e.left-=l:e.left=o(e.left-r,e.left)},top:function(e,t){var i,s=t.within,n=s.isWindow?s.scrollTop:s.offset.top,a=t.within.height,r=e.top-t.collisionPosition.marginTop,h=n-r,l=r+t.collisionHeight-a-n;t.collisionHeight>a?h>0&&0>=l?(i=e.top+h+t.collisionHeight-a-n,e.top+=h-i):e.top=l>0&&0>=h?n:h>l?n+a-t.collisionHeight:n:h>0?e.top+=h:l>0?e.top-=l:e.top=o(e.top-r,e.top)}},flip:{left:function(e,t){var i,s,n=t.within,a=n.offset.left+n.scrollLeft,o=n.width,h=n.isWindow?n.scrollLeft:n.offset.left,l=e.left-t.collisionPosition.marginLeft,u=l-h,d=l+t.collisionWidth-o-h,c="left"===t.my[0]?-t.elemWidth:"right"===t.my[0]?t.elemWidth:0,p="left"===t.at[0]?t.targetWidth:"right"===t.at[0]?-t.targetWidth:0,f=-2*t.offset[0];0>u?(i=e.left+c+p+f+t.collisionWidth-o-a,(0>i||r(u)>i)&&(e.left+=c+p+f)):d>0&&(s=e.left-t.collisionPosition.marginLeft+c+p+f-h,(s>0||d>r(s))&&(e.left+=c+p+f))},top:function(e,t){var i,s,n=t.within,a=n.offset.top+n.scrollTop,o=n.height,h=n.isWindow?n.scrollTop:n.offset.top,l=e.top-t.collisionPosition.marginTop,u=l-h,d=l+t.collisionHeight-o-h,c="top"===t.my[1],p=c?-t.elemHeight:"bottom"===t.my[1]?t.elemHeight:0,f="top"===t.at[1]?t.targetHeight:"bottom"===t.at[1]?-t.targetHeight:0,m=-2*t.offset[1];0>u?(s=e.top+p+f+m+t.collisionHeight-o-a,(0>s||r(u)>s)&&(e.top+=p+f+m)):d>0&&(i=e.top-t.collisionPosition.marginTop+p+f+m-h,(i>0||d>r(i))&&(e.top+=p+f+m))}},flipfit:{left:function(){e.ui.position.flip.left.apply(this,arguments),e.ui.position.fit.left.apply(this,arguments)},top:function(){e.ui.position.flip.top.apply(this,arguments),e.ui.position.fit.top.apply(this,arguments)}}},function(){var t,i,s,n,o,r=document.getElementsByTagName("body")[0],h=document.createElement("div");t=document.createElement(r?"div":"body"),s={visibility:"hidden",width:0,height:0,border:0,margin:0,background:"none"},r&&e.extend(s,{position:"absolute",left:"-1000px",top:"-1000px"});for(o in s)t.style[o]=s[o];t.appendChild(h),i=r||document.documentElement,i.insertBefore(t,i.firstChild),h.style.cssText="position: absolute; left: 10.7432222px;",n=e(h).offset().left,a=n>10&&11>n,t.innerHTML="",i.removeChild(t)}()}(),e.ui.position,e.widget("ui.accordion",{version:"1.11.4",options:{active:0,animate:{},collapsible:!1,event:"click",header:"> li > :first-child,> :not(li):even",heightStyle:"auto",icons:{activeHeader:"ui-icon-triangle-1-s",header:"ui-icon-triangle-1-e"},activate:null,beforeActivate:null},hideProps:{borderTopWidth:"hide",borderBottomWidth:"hide",paddingTop:"hide",paddingBottom:"hide",height:"hide"},showProps:{borderTopWidth:"show",borderBottomWidth:"show",paddingTop:"show",paddingBottom:"show",height:"show"},_create:function(){var t=this.options;this.prevShow=this.prevHide=e(),this.element.addClass("ui-accordion ui-widget ui-helper-reset").attr("role","tablist"),t.collapsible||t.active!==!1&&null!=t.active||(t.active=0),this._processPanels(),0>t.active&&(t.active+=this.headers.length),this._refresh()},_getCreateEventData:function(){return{header:this.active,panel:this.active.length?this.active.next():e()}},_createIcons:function(){var t=this.options.icons;t&&(e("").addClass("ui-accordion-header-icon ui-icon "+t.header).prependTo(this.headers),this.active.children(".ui-accordion-header-icon").removeClass(t.header).addClass(t.activeHeader),this.headers.addClass("ui-accordion-icons"))},_destroyIcons:function(){this.headers.removeClass("ui-accordion-icons").children(".ui-accordion-header-icon").remove()},_destroy:function(){var e;this.element.removeClass("ui-accordion ui-widget ui-helper-reset").removeAttr("role"),this.headers.removeClass("ui-accordion-header ui-accordion-header-active ui-state-default ui-corner-all ui-state-active ui-state-disabled ui-corner-top").removeAttr("role").removeAttr("aria-expanded").removeAttr("aria-selected").removeAttr("aria-controls").removeAttr("tabIndex").removeUniqueId(),this._destroyIcons(),e=this.headers.next().removeClass("ui-helper-reset ui-widget-content ui-corner-bottom ui-accordion-content ui-accordion-content-active ui-state-disabled").css("display","").removeAttr("role").removeAttr("aria-hidden").removeAttr("aria-labelledby").removeUniqueId(),"content"!==this.options.heightStyle&&e.css("height","")},_setOption:function(e,t){return"active"===e?(this._activate(t),void 0):("event"===e&&(this.options.event&&this._off(this.headers,this.options.event),this._setupEvents(t)),this._super(e,t),"collapsible"!==e||t||this.options.active!==!1||this._activate(0),"icons"===e&&(this._destroyIcons(),t&&this._createIcons()),"disabled"===e&&(this.element.toggleClass("ui-state-disabled",!!t).attr("aria-disabled",t),this.headers.add(this.headers.next()).toggleClass("ui-state-disabled",!!t)),void 0)},_keydown:function(t){if(!t.altKey&&!t.ctrlKey){var i=e.ui.keyCode,s=this.headers.length,n=this.headers.index(t.target),a=!1;switch(t.keyCode){case i.RIGHT:case i.DOWN:a=this.headers[(n+1)%s];break;case i.LEFT:case i.UP:a=this.headers[(n-1+s)%s];break;case i.SPACE:case i.ENTER:this._eventHandler(t);break;case i.HOME:a=this.headers[0];break;case i.END:a=this.headers[s-1]}a&&(e(t.target).attr("tabIndex",-1),e(a).attr("tabIndex",0),a.focus(),t.preventDefault())}},_panelKeyDown:function(t){t.keyCode===e.ui.keyCode.UP&&t.ctrlKey&&e(t.currentTarget).prev().focus()},refresh:function(){var t=this.options;this._processPanels(),t.active===!1&&t.collapsible===!0||!this.headers.length?(t.active=!1,this.active=e()):t.active===!1?this._activate(0):this.active.length&&!e.contains(this.element[0],this.active[0])?this.headers.length===this.headers.find(".ui-state-disabled").length?(t.active=!1,this.active=e()):this._activate(Math.max(0,t.active-1)):t.active=this.headers.index(this.active),this._destroyIcons(),this._refresh()},_processPanels:function(){var e=this.headers,t=this.panels;this.headers=this.element.find(this.options.header).addClass("ui-accordion-header ui-state-default ui-corner-all"),this.panels=this.headers.next().addClass("ui-accordion-content ui-helper-reset ui-widget-content ui-corner-bottom").filter(":not(.ui-accordion-content-active)").hide(),t&&(this._off(e.not(this.headers)),this._off(t.not(this.panels)))},_refresh:function(){var t,i=this.options,s=i.heightStyle,n=this.element.parent();this.active=this._findActive(i.active).addClass("ui-accordion-header-active ui-state-active ui-corner-top").removeClass("ui-corner-all"),this.active.next().addClass("ui-accordion-content-active").show(),this.headers.attr("role","tab").each(function(){var t=e(this),i=t.uniqueId().attr("id"),s=t.next(),n=s.uniqueId().attr("id");t.attr("aria-controls",n),s.attr("aria-labelledby",i)}).next().attr("role","tabpanel"),this.headers.not(this.active).attr({"aria-selected":"false","aria-expanded":"false",tabIndex:-1}).next().attr({"aria-hidden":"true"}).hide(),this.active.length?this.active.attr({"aria-selected":"true","aria-expanded":"true",tabIndex:0}).next().attr({"aria-hidden":"false"}):this.headers.eq(0).attr("tabIndex",0),this._createIcons(),this._setupEvents(i.event),"fill"===s?(t=n.height(),this.element.siblings(":visible").each(function(){var i=e(this),s=i.css("position");"absolute"!==s&&"fixed"!==s&&(t-=i.outerHeight(!0))}),this.headers.each(function(){t-=e(this).outerHeight(!0)}),this.headers.next().each(function(){e(this).height(Math.max(0,t-e(this).innerHeight()+e(this).height()))}).css("overflow","auto")):"auto"===s&&(t=0,this.headers.next().each(function(){t=Math.max(t,e(this).css("height","").height())}).height(t))},_activate:function(t){var i=this._findActive(t)[0];i!==this.active[0]&&(i=i||this.active[0],this._eventHandler({target:i,currentTarget:i,preventDefault:e.noop}))},_findActive:function(t){return"number"==typeof t?this.headers.eq(t):e()},_setupEvents:function(t){var i={keydown:"_keydown"};t&&e.each(t.split(" "),function(e,t){i[t]="_eventHandler"}),this._off(this.headers.add(this.headers.next())),this._on(this.headers,i),this._on(this.headers.next(),{keydown:"_panelKeyDown"}),this._hoverable(this.headers),this._focusable(this.headers)},_eventHandler:function(t){var i=this.options,s=this.active,n=e(t.currentTarget),a=n[0]===s[0],o=a&&i.collapsible,r=o?e():n.next(),h=s.next(),l={oldHeader:s,oldPanel:h,newHeader:o?e():n,newPanel:r};t.preventDefault(),a&&!i.collapsible||this._trigger("beforeActivate",t,l)===!1||(i.active=o?!1:this.headers.index(n),this.active=a?e():n,this._toggle(l),s.removeClass("ui-accordion-header-active ui-state-active"),i.icons&&s.children(".ui-accordion-header-icon").removeClass(i.icons.activeHeader).addClass(i.icons.header),a||(n.removeClass("ui-corner-all").addClass("ui-accordion-header-active ui-state-active ui-corner-top"),i.icons&&n.children(".ui-accordion-header-icon").removeClass(i.icons.header).addClass(i.icons.activeHeader),n.next().addClass("ui-accordion-content-active")))},_toggle:function(t){var i=t.newPanel,s=this.prevShow.length?this.prevShow:t.oldPanel;this.prevShow.add(this.prevHide).stop(!0,!0),this.prevShow=i,this.prevHide=s,this.options.animate?this._animate(i,s,t):(s.hide(),i.show(),this._toggleComplete(t)),s.attr({"aria-hidden":"true"}),s.prev().attr({"aria-selected":"false","aria-expanded":"false"}),i.length&&s.length?s.prev().attr({tabIndex:-1,"aria-expanded":"false"}):i.length&&this.headers.filter(function(){return 0===parseInt(e(this).attr("tabIndex"),10)}).attr("tabIndex",-1),i.attr("aria-hidden","false").prev().attr({"aria-selected":"true","aria-expanded":"true",tabIndex:0})},_animate:function(e,t,i){var s,n,a,o=this,r=0,h=e.css("box-sizing"),l=e.length&&(!t.length||e.index()",delay:300,options:{icons:{submenu:"ui-icon-carat-1-e"},items:"> *",menus:"ul",position:{my:"left-1 top",at:"right top"},role:"menu",blur:null,focus:null,select:null},_create:function(){this.activeMenu=this.element,this.mouseHandled=!1,this.element.uniqueId().addClass("ui-menu ui-widget ui-widget-content").toggleClass("ui-menu-icons",!!this.element.find(".ui-icon").length).attr({role:this.options.role,tabIndex:0}),this.options.disabled&&this.element.addClass("ui-state-disabled").attr("aria-disabled","true"),this._on({"mousedown .ui-menu-item":function(e){e.preventDefault()},"click .ui-menu-item":function(t){var i=e(t.target);!this.mouseHandled&&i.not(".ui-state-disabled").length&&(this.select(t),t.isPropagationStopped()||(this.mouseHandled=!0),i.has(".ui-menu").length?this.expand(t):!this.element.is(":focus")&&e(this.document[0].activeElement).closest(".ui-menu").length&&(this.element.trigger("focus",[!0]),this.active&&1===this.active.parents(".ui-menu").length&&clearTimeout(this.timer)))},"mouseenter .ui-menu-item":function(t){if(!this.previousFilter){var i=e(t.currentTarget); +i.siblings(".ui-state-active").removeClass("ui-state-active"),this.focus(t,i)}},mouseleave:"collapseAll","mouseleave .ui-menu":"collapseAll",focus:function(e,t){var i=this.active||this.element.find(this.options.items).eq(0);t||this.focus(e,i)},blur:function(t){this._delay(function(){e.contains(this.element[0],this.document[0].activeElement)||this.collapseAll(t)})},keydown:"_keydown"}),this.refresh(),this._on(this.document,{click:function(e){this._closeOnDocumentClick(e)&&this.collapseAll(e),this.mouseHandled=!1}})},_destroy:function(){this.element.removeAttr("aria-activedescendant").find(".ui-menu").addBack().removeClass("ui-menu ui-widget ui-widget-content ui-menu-icons ui-front").removeAttr("role").removeAttr("tabIndex").removeAttr("aria-labelledby").removeAttr("aria-expanded").removeAttr("aria-hidden").removeAttr("aria-disabled").removeUniqueId().show(),this.element.find(".ui-menu-item").removeClass("ui-menu-item").removeAttr("role").removeAttr("aria-disabled").removeUniqueId().removeClass("ui-state-hover").removeAttr("tabIndex").removeAttr("role").removeAttr("aria-haspopup").children().each(function(){var t=e(this);t.data("ui-menu-submenu-carat")&&t.remove()}),this.element.find(".ui-menu-divider").removeClass("ui-menu-divider ui-widget-content")},_keydown:function(t){var i,s,n,a,o=!0;switch(t.keyCode){case e.ui.keyCode.PAGE_UP:this.previousPage(t);break;case e.ui.keyCode.PAGE_DOWN:this.nextPage(t);break;case e.ui.keyCode.HOME:this._move("first","first",t);break;case e.ui.keyCode.END:this._move("last","last",t);break;case e.ui.keyCode.UP:this.previous(t);break;case e.ui.keyCode.DOWN:this.next(t);break;case e.ui.keyCode.LEFT:this.collapse(t);break;case e.ui.keyCode.RIGHT:this.active&&!this.active.is(".ui-state-disabled")&&this.expand(t);break;case e.ui.keyCode.ENTER:case e.ui.keyCode.SPACE:this._activate(t);break;case e.ui.keyCode.ESCAPE:this.collapse(t);break;default:o=!1,s=this.previousFilter||"",n=String.fromCharCode(t.keyCode),a=!1,clearTimeout(this.filterTimer),n===s?a=!0:n=s+n,i=this._filterMenuItems(n),i=a&&-1!==i.index(this.active.next())?this.active.nextAll(".ui-menu-item"):i,i.length||(n=String.fromCharCode(t.keyCode),i=this._filterMenuItems(n)),i.length?(this.focus(t,i),this.previousFilter=n,this.filterTimer=this._delay(function(){delete this.previousFilter},1e3)):delete this.previousFilter}o&&t.preventDefault()},_activate:function(e){this.active.is(".ui-state-disabled")||(this.active.is("[aria-haspopup='true']")?this.expand(e):this.select(e))},refresh:function(){var t,i,s=this,n=this.options.icons.submenu,a=this.element.find(this.options.menus);this.element.toggleClass("ui-menu-icons",!!this.element.find(".ui-icon").length),a.filter(":not(.ui-menu)").addClass("ui-menu ui-widget ui-widget-content ui-front").hide().attr({role:this.options.role,"aria-hidden":"true","aria-expanded":"false"}).each(function(){var t=e(this),i=t.parent(),s=e("").addClass("ui-menu-icon ui-icon "+n).data("ui-menu-submenu-carat",!0);i.attr("aria-haspopup","true").prepend(s),t.attr("aria-labelledby",i.attr("id"))}),t=a.add(this.element),i=t.find(this.options.items),i.not(".ui-menu-item").each(function(){var t=e(this);s._isDivider(t)&&t.addClass("ui-widget-content ui-menu-divider")}),i.not(".ui-menu-item, .ui-menu-divider").addClass("ui-menu-item").uniqueId().attr({tabIndex:-1,role:this._itemRole()}),i.filter(".ui-state-disabled").attr("aria-disabled","true"),this.active&&!e.contains(this.element[0],this.active[0])&&this.blur()},_itemRole:function(){return{menu:"menuitem",listbox:"option"}[this.options.role]},_setOption:function(e,t){"icons"===e&&this.element.find(".ui-menu-icon").removeClass(this.options.icons.submenu).addClass(t.submenu),"disabled"===e&&this.element.toggleClass("ui-state-disabled",!!t).attr("aria-disabled",t),this._super(e,t)},focus:function(e,t){var i,s;this.blur(e,e&&"focus"===e.type),this._scrollIntoView(t),this.active=t.first(),s=this.active.addClass("ui-state-focus").removeClass("ui-state-active"),this.options.role&&this.element.attr("aria-activedescendant",s.attr("id")),this.active.parent().closest(".ui-menu-item").addClass("ui-state-active"),e&&"keydown"===e.type?this._close():this.timer=this._delay(function(){this._close()},this.delay),i=t.children(".ui-menu"),i.length&&e&&/^mouse/.test(e.type)&&this._startOpening(i),this.activeMenu=t.parent(),this._trigger("focus",e,{item:t})},_scrollIntoView:function(t){var i,s,n,a,o,r;this._hasScroll()&&(i=parseFloat(e.css(this.activeMenu[0],"borderTopWidth"))||0,s=parseFloat(e.css(this.activeMenu[0],"paddingTop"))||0,n=t.offset().top-this.activeMenu.offset().top-i-s,a=this.activeMenu.scrollTop(),o=this.activeMenu.height(),r=t.outerHeight(),0>n?this.activeMenu.scrollTop(a+n):n+r>o&&this.activeMenu.scrollTop(a+n-o+r))},blur:function(e,t){t||clearTimeout(this.timer),this.active&&(this.active.removeClass("ui-state-focus"),this.active=null,this._trigger("blur",e,{item:this.active}))},_startOpening:function(e){clearTimeout(this.timer),"true"===e.attr("aria-hidden")&&(this.timer=this._delay(function(){this._close(),this._open(e)},this.delay))},_open:function(t){var i=e.extend({of:this.active},this.options.position);clearTimeout(this.timer),this.element.find(".ui-menu").not(t.parents(".ui-menu")).hide().attr("aria-hidden","true"),t.show().removeAttr("aria-hidden").attr("aria-expanded","true").position(i)},collapseAll:function(t,i){clearTimeout(this.timer),this.timer=this._delay(function(){var s=i?this.element:e(t&&t.target).closest(this.element.find(".ui-menu"));s.length||(s=this.element),this._close(s),this.blur(t),this.activeMenu=s},this.delay)},_close:function(e){e||(e=this.active?this.active.parent():this.element),e.find(".ui-menu").hide().attr("aria-hidden","true").attr("aria-expanded","false").end().find(".ui-state-active").not(".ui-state-focus").removeClass("ui-state-active")},_closeOnDocumentClick:function(t){return!e(t.target).closest(".ui-menu").length},_isDivider:function(e){return!/[^\-\u2014\u2013\s]/.test(e.text())},collapse:function(e){var t=this.active&&this.active.parent().closest(".ui-menu-item",this.element);t&&t.length&&(this._close(),this.focus(e,t))},expand:function(e){var t=this.active&&this.active.children(".ui-menu ").find(this.options.items).first();t&&t.length&&(this._open(t.parent()),this._delay(function(){this.focus(e,t)}))},next:function(e){this._move("next","first",e)},previous:function(e){this._move("prev","last",e)},isFirstItem:function(){return this.active&&!this.active.prevAll(".ui-menu-item").length},isLastItem:function(){return this.active&&!this.active.nextAll(".ui-menu-item").length},_move:function(e,t,i){var s;this.active&&(s="first"===e||"last"===e?this.active["first"===e?"prevAll":"nextAll"](".ui-menu-item").eq(-1):this.active[e+"All"](".ui-menu-item").eq(0)),s&&s.length&&this.active||(s=this.activeMenu.find(this.options.items)[t]()),this.focus(i,s)},nextPage:function(t){var i,s,n;return this.active?(this.isLastItem()||(this._hasScroll()?(s=this.active.offset().top,n=this.element.height(),this.active.nextAll(".ui-menu-item").each(function(){return i=e(this),0>i.offset().top-s-n}),this.focus(t,i)):this.focus(t,this.activeMenu.find(this.options.items)[this.active?"last":"first"]())),void 0):(this.next(t),void 0)},previousPage:function(t){var i,s,n;return this.active?(this.isFirstItem()||(this._hasScroll()?(s=this.active.offset().top,n=this.element.height(),this.active.prevAll(".ui-menu-item").each(function(){return i=e(this),i.offset().top-s+n>0}),this.focus(t,i)):this.focus(t,this.activeMenu.find(this.options.items).first())),void 0):(this.next(t),void 0)},_hasScroll:function(){return this.element.outerHeight()",options:{appendTo:null,autoFocus:!1,delay:300,minLength:1,position:{my:"left top",at:"left bottom",collision:"none"},source:null,change:null,close:null,focus:null,open:null,response:null,search:null,select:null},requestIndex:0,pending:0,_create:function(){var t,i,s,n=this.element[0].nodeName.toLowerCase(),a="textarea"===n,o="input"===n;this.isMultiLine=a?!0:o?!1:this.element.prop("isContentEditable"),this.valueMethod=this.element[a||o?"val":"text"],this.isNewMenu=!0,this.element.addClass("ui-autocomplete-input").attr("autocomplete","off"),this._on(this.element,{keydown:function(n){if(this.element.prop("readOnly"))return t=!0,s=!0,i=!0,void 0;t=!1,s=!1,i=!1;var a=e.ui.keyCode;switch(n.keyCode){case a.PAGE_UP:t=!0,this._move("previousPage",n);break;case a.PAGE_DOWN:t=!0,this._move("nextPage",n);break;case a.UP:t=!0,this._keyEvent("previous",n);break;case a.DOWN:t=!0,this._keyEvent("next",n);break;case a.ENTER:this.menu.active&&(t=!0,n.preventDefault(),this.menu.select(n));break;case a.TAB:this.menu.active&&this.menu.select(n);break;case a.ESCAPE:this.menu.element.is(":visible")&&(this.isMultiLine||this._value(this.term),this.close(n),n.preventDefault());break;default:i=!0,this._searchTimeout(n)}},keypress:function(s){if(t)return t=!1,(!this.isMultiLine||this.menu.element.is(":visible"))&&s.preventDefault(),void 0;if(!i){var n=e.ui.keyCode;switch(s.keyCode){case n.PAGE_UP:this._move("previousPage",s);break;case n.PAGE_DOWN:this._move("nextPage",s);break;case n.UP:this._keyEvent("previous",s);break;case n.DOWN:this._keyEvent("next",s)}}},input:function(e){return s?(s=!1,e.preventDefault(),void 0):(this._searchTimeout(e),void 0)},focus:function(){this.selectedItem=null,this.previous=this._value()},blur:function(e){return this.cancelBlur?(delete this.cancelBlur,void 0):(clearTimeout(this.searching),this.close(e),this._change(e),void 0)}}),this._initSource(),this.menu=e("
      ").addClass("ui-autocomplete ui-front").appendTo(this._appendTo()).menu({role:null}).hide().menu("instance"),this._on(this.menu.element,{mousedown:function(t){t.preventDefault(),this.cancelBlur=!0,this._delay(function(){delete this.cancelBlur});var i=this.menu.element[0];e(t.target).closest(".ui-menu-item").length||this._delay(function(){var t=this;this.document.one("mousedown",function(s){s.target===t.element[0]||s.target===i||e.contains(i,s.target)||t.close()})})},menufocus:function(t,i){var s,n;return this.isNewMenu&&(this.isNewMenu=!1,t.originalEvent&&/^mouse/.test(t.originalEvent.type))?(this.menu.blur(),this.document.one("mousemove",function(){e(t.target).trigger(t.originalEvent)}),void 0):(n=i.item.data("ui-autocomplete-item"),!1!==this._trigger("focus",t,{item:n})&&t.originalEvent&&/^key/.test(t.originalEvent.type)&&this._value(n.value),s=i.item.attr("aria-label")||n.value,s&&e.trim(s).length&&(this.liveRegion.children().hide(),e("
      ").text(s).appendTo(this.liveRegion)),void 0)},menuselect:function(e,t){var i=t.item.data("ui-autocomplete-item"),s=this.previous;this.element[0]!==this.document[0].activeElement&&(this.element.focus(),this.previous=s,this._delay(function(){this.previous=s,this.selectedItem=i})),!1!==this._trigger("select",e,{item:i})&&this._value(i.value),this.term=this._value(),this.close(e),this.selectedItem=i}}),this.liveRegion=e("",{role:"status","aria-live":"assertive","aria-relevant":"additions"}).addClass("ui-helper-hidden-accessible").appendTo(this.document[0].body),this._on(this.window,{beforeunload:function(){this.element.removeAttr("autocomplete")}})},_destroy:function(){clearTimeout(this.searching),this.element.removeClass("ui-autocomplete-input").removeAttr("autocomplete"),this.menu.element.remove(),this.liveRegion.remove()},_setOption:function(e,t){this._super(e,t),"source"===e&&this._initSource(),"appendTo"===e&&this.menu.element.appendTo(this._appendTo()),"disabled"===e&&t&&this.xhr&&this.xhr.abort()},_appendTo:function(){var t=this.options.appendTo;return t&&(t=t.jquery||t.nodeType?e(t):this.document.find(t).eq(0)),t&&t[0]||(t=this.element.closest(".ui-front")),t.length||(t=this.document[0].body),t},_initSource:function(){var t,i,s=this;e.isArray(this.options.source)?(t=this.options.source,this.source=function(i,s){s(e.ui.autocomplete.filter(t,i.term))}):"string"==typeof this.options.source?(i=this.options.source,this.source=function(t,n){s.xhr&&s.xhr.abort(),s.xhr=e.ajax({url:i,data:t,dataType:"json",success:function(e){n(e)},error:function(){n([])}})}):this.source=this.options.source},_searchTimeout:function(e){clearTimeout(this.searching),this.searching=this._delay(function(){var t=this.term===this._value(),i=this.menu.element.is(":visible"),s=e.altKey||e.ctrlKey||e.metaKey||e.shiftKey;(!t||t&&!i&&!s)&&(this.selectedItem=null,this.search(null,e))},this.options.delay)},search:function(e,t){return e=null!=e?e:this._value(),this.term=this._value(),e.length").text(i.label).appendTo(t)},_move:function(e,t){return this.menu.element.is(":visible")?this.menu.isFirstItem()&&/^previous/.test(e)||this.menu.isLastItem()&&/^next/.test(e)?(this.isMultiLine||this._value(this.term),this.menu.blur(),void 0):(this.menu[e](t),void 0):(this.search(null,t),void 0)},widget:function(){return this.menu.element},_value:function(){return this.valueMethod.apply(this.element,arguments)},_keyEvent:function(e,t){(!this.isMultiLine||this.menu.element.is(":visible"))&&(this._move(e,t),t.preventDefault())}}),e.extend(e.ui.autocomplete,{escapeRegex:function(e){return e.replace(/[\-\[\]{}()*+?.,\\\^$|#\s]/g,"\\$&")},filter:function(t,i){var s=RegExp(e.ui.autocomplete.escapeRegex(i),"i");return e.grep(t,function(e){return s.test(e.label||e.value||e)})}}),e.widget("ui.autocomplete",e.ui.autocomplete,{options:{messages:{noResults:"No search results.",results:function(e){return e+(e>1?" results are":" result is")+" available, use up and down arrow keys to navigate."}}},__response:function(t){var i;this._superApply(arguments),this.options.disabled||this.cancelSearch||(i=t&&t.length?this.options.messages.results(t.length):this.options.messages.noResults,this.liveRegion.children().hide(),e("
      ").text(i).appendTo(this.liveRegion))}}),e.ui.autocomplete;var c,p="ui-button ui-widget ui-state-default ui-corner-all",f="ui-button-icons-only ui-button-icon-only ui-button-text-icons ui-button-text-icon-primary ui-button-text-icon-secondary ui-button-text-only",m=function(){var t=e(this);setTimeout(function(){t.find(":ui-button").button("refresh")},1)},g=function(t){var i=t.name,s=t.form,n=e([]);return i&&(i=i.replace(/'/g,"\\'"),n=s?e(s).find("[name='"+i+"'][type=radio]"):e("[name='"+i+"'][type=radio]",t.ownerDocument).filter(function(){return!this.form})),n};e.widget("ui.button",{version:"1.11.4",defaultElement:"").addClass(this._triggerClass).html(a?e("").attr({src:a,alt:n,title:n}):n)),t[r?"before":"after"](i.trigger),i.trigger.click(function(){return e.datepicker._datepickerShowing&&e.datepicker._lastInput===t[0]?e.datepicker._hideDatepicker():e.datepicker._datepickerShowing&&e.datepicker._lastInput!==t[0]?(e.datepicker._hideDatepicker(),e.datepicker._showDatepicker(t[0])):e.datepicker._showDatepicker(t[0]),!1}))},_autoSize:function(e){if(this._get(e,"autoSize")&&!e.inline){var t,i,s,n,a=new Date(2009,11,20),o=this._get(e,"dateFormat");o.match(/[DM]/)&&(t=function(e){for(i=0,s=0,n=0;e.length>n;n++)e[n].length>i&&(i=e[n].length,s=n);return s},a.setMonth(t(this._get(e,o.match(/MM/)?"monthNames":"monthNamesShort"))),a.setDate(t(this._get(e,o.match(/DD/)?"dayNames":"dayNamesShort"))+20-a.getDay())),e.input.attr("size",this._formatDate(e,a).length)}},_inlineDatepicker:function(t,i){var s=e(t);s.hasClass(this.markerClassName)||(s.addClass(this.markerClassName).append(i.dpDiv),e.data(t,"datepicker",i),this._setDate(i,this._getDefaultDate(i),!0),this._updateDatepicker(i),this._updateAlternate(i),i.settings.disabled&&this._disableDatepicker(t),i.dpDiv.css("display","block"))},_dialogDatepicker:function(t,i,s,n,a){var o,h,l,u,d,c=this._dialogInst;return c||(this.uuid+=1,o="dp"+this.uuid,this._dialogInput=e(""),this._dialogInput.keydown(this._doKeyDown),e("body").append(this._dialogInput),c=this._dialogInst=this._newInst(this._dialogInput,!1),c.settings={},e.data(this._dialogInput[0],"datepicker",c)),r(c.settings,n||{}),i=i&&i.constructor===Date?this._formatDate(c,i):i,this._dialogInput.val(i),this._pos=a?a.length?a:[a.pageX,a.pageY]:null,this._pos||(h=document.documentElement.clientWidth,l=document.documentElement.clientHeight,u=document.documentElement.scrollLeft||document.body.scrollLeft,d=document.documentElement.scrollTop||document.body.scrollTop,this._pos=[h/2-100+u,l/2-150+d]),this._dialogInput.css("left",this._pos[0]+20+"px").css("top",this._pos[1]+"px"),c.settings.onSelect=s,this._inDialog=!0,this.dpDiv.addClass(this._dialogClass),this._showDatepicker(this._dialogInput[0]),e.blockUI&&e.blockUI(this.dpDiv),e.data(this._dialogInput[0],"datepicker",c),this},_destroyDatepicker:function(t){var i,s=e(t),n=e.data(t,"datepicker");s.hasClass(this.markerClassName)&&(i=t.nodeName.toLowerCase(),e.removeData(t,"datepicker"),"input"===i?(n.append.remove(),n.trigger.remove(),s.removeClass(this.markerClassName).unbind("focus",this._showDatepicker).unbind("keydown",this._doKeyDown).unbind("keypress",this._doKeyPress).unbind("keyup",this._doKeyUp)):("div"===i||"span"===i)&&s.removeClass(this.markerClassName).empty(),v===n&&(v=null))},_enableDatepicker:function(t){var i,s,n=e(t),a=e.data(t,"datepicker");n.hasClass(this.markerClassName)&&(i=t.nodeName.toLowerCase(),"input"===i?(t.disabled=!1,a.trigger.filter("button").each(function(){this.disabled=!1}).end().filter("img").css({opacity:"1.0",cursor:""})):("div"===i||"span"===i)&&(s=n.children("."+this._inlineClass),s.children().removeClass("ui-state-disabled"),s.find("select.ui-datepicker-month, select.ui-datepicker-year").prop("disabled",!1)),this._disabledInputs=e.map(this._disabledInputs,function(e){return e===t?null:e}))},_disableDatepicker:function(t){var i,s,n=e(t),a=e.data(t,"datepicker");n.hasClass(this.markerClassName)&&(i=t.nodeName.toLowerCase(),"input"===i?(t.disabled=!0,a.trigger.filter("button").each(function(){this.disabled=!0}).end().filter("img").css({opacity:"0.5",cursor:"default"})):("div"===i||"span"===i)&&(s=n.children("."+this._inlineClass),s.children().addClass("ui-state-disabled"),s.find("select.ui-datepicker-month, select.ui-datepicker-year").prop("disabled",!0)),this._disabledInputs=e.map(this._disabledInputs,function(e){return e===t?null:e}),this._disabledInputs[this._disabledInputs.length]=t)},_isDisabledDatepicker:function(e){if(!e)return!1;for(var t=0;this._disabledInputs.length>t;t++)if(this._disabledInputs[t]===e)return!0;return!1},_getInst:function(t){try{return e.data(t,"datepicker")}catch(i){throw"Missing instance data for this datepicker"}},_optionDatepicker:function(t,i,s){var n,a,o,h,l=this._getInst(t);return 2===arguments.length&&"string"==typeof i?"defaults"===i?e.extend({},e.datepicker._defaults):l?"all"===i?e.extend({},l.settings):this._get(l,i):null:(n=i||{},"string"==typeof i&&(n={},n[i]=s),l&&(this._curInst===l&&this._hideDatepicker(),a=this._getDateDatepicker(t,!0),o=this._getMinMaxDate(l,"min"),h=this._getMinMaxDate(l,"max"),r(l.settings,n),null!==o&&void 0!==n.dateFormat&&void 0===n.minDate&&(l.settings.minDate=this._formatDate(l,o)),null!==h&&void 0!==n.dateFormat&&void 0===n.maxDate&&(l.settings.maxDate=this._formatDate(l,h)),"disabled"in n&&(n.disabled?this._disableDatepicker(t):this._enableDatepicker(t)),this._attachments(e(t),l),this._autoSize(l),this._setDate(l,a),this._updateAlternate(l),this._updateDatepicker(l)),void 0)},_changeDatepicker:function(e,t,i){this._optionDatepicker(e,t,i)},_refreshDatepicker:function(e){var t=this._getInst(e);t&&this._updateDatepicker(t)},_setDateDatepicker:function(e,t){var i=this._getInst(e);i&&(this._setDate(i,t),this._updateDatepicker(i),this._updateAlternate(i))},_getDateDatepicker:function(e,t){var i=this._getInst(e);return i&&!i.inline&&this._setDateFromField(i,t),i?this._getDate(i):null},_doKeyDown:function(t){var i,s,n,a=e.datepicker._getInst(t.target),o=!0,r=a.dpDiv.is(".ui-datepicker-rtl");if(a._keyEvent=!0,e.datepicker._datepickerShowing)switch(t.keyCode){case 9:e.datepicker._hideDatepicker(),o=!1;break;case 13:return n=e("td."+e.datepicker._dayOverClass+":not(."+e.datepicker._currentClass+")",a.dpDiv),n[0]&&e.datepicker._selectDay(t.target,a.selectedMonth,a.selectedYear,n[0]),i=e.datepicker._get(a,"onSelect"),i?(s=e.datepicker._formatDate(a),i.apply(a.input?a.input[0]:null,[s,a])):e.datepicker._hideDatepicker(),!1;case 27:e.datepicker._hideDatepicker();break;case 33:e.datepicker._adjustDate(t.target,t.ctrlKey?-e.datepicker._get(a,"stepBigMonths"):-e.datepicker._get(a,"stepMonths"),"M");break;case 34:e.datepicker._adjustDate(t.target,t.ctrlKey?+e.datepicker._get(a,"stepBigMonths"):+e.datepicker._get(a,"stepMonths"),"M");break;case 35:(t.ctrlKey||t.metaKey)&&e.datepicker._clearDate(t.target),o=t.ctrlKey||t.metaKey;break;case 36:(t.ctrlKey||t.metaKey)&&e.datepicker._gotoToday(t.target),o=t.ctrlKey||t.metaKey;break;case 37:(t.ctrlKey||t.metaKey)&&e.datepicker._adjustDate(t.target,r?1:-1,"D"),o=t.ctrlKey||t.metaKey,t.originalEvent.altKey&&e.datepicker._adjustDate(t.target,t.ctrlKey?-e.datepicker._get(a,"stepBigMonths"):-e.datepicker._get(a,"stepMonths"),"M");break;case 38:(t.ctrlKey||t.metaKey)&&e.datepicker._adjustDate(t.target,-7,"D"),o=t.ctrlKey||t.metaKey;break;case 39:(t.ctrlKey||t.metaKey)&&e.datepicker._adjustDate(t.target,r?-1:1,"D"),o=t.ctrlKey||t.metaKey,t.originalEvent.altKey&&e.datepicker._adjustDate(t.target,t.ctrlKey?+e.datepicker._get(a,"stepBigMonths"):+e.datepicker._get(a,"stepMonths"),"M");break;case 40:(t.ctrlKey||t.metaKey)&&e.datepicker._adjustDate(t.target,7,"D"),o=t.ctrlKey||t.metaKey;break;default:o=!1}else 36===t.keyCode&&t.ctrlKey?e.datepicker._showDatepicker(this):o=!1;o&&(t.preventDefault(),t.stopPropagation())},_doKeyPress:function(t){var i,s,n=e.datepicker._getInst(t.target); +return e.datepicker._get(n,"constrainInput")?(i=e.datepicker._possibleChars(e.datepicker._get(n,"dateFormat")),s=String.fromCharCode(null==t.charCode?t.keyCode:t.charCode),t.ctrlKey||t.metaKey||" ">s||!i||i.indexOf(s)>-1):void 0},_doKeyUp:function(t){var i,s=e.datepicker._getInst(t.target);if(s.input.val()!==s.lastVal)try{i=e.datepicker.parseDate(e.datepicker._get(s,"dateFormat"),s.input?s.input.val():null,e.datepicker._getFormatConfig(s)),i&&(e.datepicker._setDateFromField(s),e.datepicker._updateAlternate(s),e.datepicker._updateDatepicker(s))}catch(n){}return!0},_showDatepicker:function(t){if(t=t.target||t,"input"!==t.nodeName.toLowerCase()&&(t=e("input",t.parentNode)[0]),!e.datepicker._isDisabledDatepicker(t)&&e.datepicker._lastInput!==t){var i,n,a,o,h,l,u;i=e.datepicker._getInst(t),e.datepicker._curInst&&e.datepicker._curInst!==i&&(e.datepicker._curInst.dpDiv.stop(!0,!0),i&&e.datepicker._datepickerShowing&&e.datepicker._hideDatepicker(e.datepicker._curInst.input[0])),n=e.datepicker._get(i,"beforeShow"),a=n?n.apply(t,[t,i]):{},a!==!1&&(r(i.settings,a),i.lastVal=null,e.datepicker._lastInput=t,e.datepicker._setDateFromField(i),e.datepicker._inDialog&&(t.value=""),e.datepicker._pos||(e.datepicker._pos=e.datepicker._findPos(t),e.datepicker._pos[1]+=t.offsetHeight),o=!1,e(t).parents().each(function(){return o|="fixed"===e(this).css("position"),!o}),h={left:e.datepicker._pos[0],top:e.datepicker._pos[1]},e.datepicker._pos=null,i.dpDiv.empty(),i.dpDiv.css({position:"absolute",display:"block",top:"-1000px"}),e.datepicker._updateDatepicker(i),h=e.datepicker._checkOffset(i,h,o),i.dpDiv.css({position:e.datepicker._inDialog&&e.blockUI?"static":o?"fixed":"absolute",display:"none",left:h.left+"px",top:h.top+"px"}),i.inline||(l=e.datepicker._get(i,"showAnim"),u=e.datepicker._get(i,"duration"),i.dpDiv.css("z-index",s(e(t))+1),e.datepicker._datepickerShowing=!0,e.effects&&e.effects.effect[l]?i.dpDiv.show(l,e.datepicker._get(i,"showOptions"),u):i.dpDiv[l||"show"](l?u:null),e.datepicker._shouldFocusInput(i)&&i.input.focus(),e.datepicker._curInst=i))}},_updateDatepicker:function(t){this.maxRows=4,v=t,t.dpDiv.empty().append(this._generateHTML(t)),this._attachHandlers(t);var i,s=this._getNumberOfMonths(t),n=s[1],a=17,r=t.dpDiv.find("."+this._dayOverClass+" a");r.length>0&&o.apply(r.get(0)),t.dpDiv.removeClass("ui-datepicker-multi-2 ui-datepicker-multi-3 ui-datepicker-multi-4").width(""),n>1&&t.dpDiv.addClass("ui-datepicker-multi-"+n).css("width",a*n+"em"),t.dpDiv[(1!==s[0]||1!==s[1]?"add":"remove")+"Class"]("ui-datepicker-multi"),t.dpDiv[(this._get(t,"isRTL")?"add":"remove")+"Class"]("ui-datepicker-rtl"),t===e.datepicker._curInst&&e.datepicker._datepickerShowing&&e.datepicker._shouldFocusInput(t)&&t.input.focus(),t.yearshtml&&(i=t.yearshtml,setTimeout(function(){i===t.yearshtml&&t.yearshtml&&t.dpDiv.find("select.ui-datepicker-year:first").replaceWith(t.yearshtml),i=t.yearshtml=null},0))},_shouldFocusInput:function(e){return e.input&&e.input.is(":visible")&&!e.input.is(":disabled")&&!e.input.is(":focus")},_checkOffset:function(t,i,s){var n=t.dpDiv.outerWidth(),a=t.dpDiv.outerHeight(),o=t.input?t.input.outerWidth():0,r=t.input?t.input.outerHeight():0,h=document.documentElement.clientWidth+(s?0:e(document).scrollLeft()),l=document.documentElement.clientHeight+(s?0:e(document).scrollTop());return i.left-=this._get(t,"isRTL")?n-o:0,i.left-=s&&i.left===t.input.offset().left?e(document).scrollLeft():0,i.top-=s&&i.top===t.input.offset().top+r?e(document).scrollTop():0,i.left-=Math.min(i.left,i.left+n>h&&h>n?Math.abs(i.left+n-h):0),i.top-=Math.min(i.top,i.top+a>l&&l>a?Math.abs(a+r):0),i},_findPos:function(t){for(var i,s=this._getInst(t),n=this._get(s,"isRTL");t&&("hidden"===t.type||1!==t.nodeType||e.expr.filters.hidden(t));)t=t[n?"previousSibling":"nextSibling"];return i=e(t).offset(),[i.left,i.top]},_hideDatepicker:function(t){var i,s,n,a,o=this._curInst;!o||t&&o!==e.data(t,"datepicker")||this._datepickerShowing&&(i=this._get(o,"showAnim"),s=this._get(o,"duration"),n=function(){e.datepicker._tidyDialog(o)},e.effects&&(e.effects.effect[i]||e.effects[i])?o.dpDiv.hide(i,e.datepicker._get(o,"showOptions"),s,n):o.dpDiv["slideDown"===i?"slideUp":"fadeIn"===i?"fadeOut":"hide"](i?s:null,n),i||n(),this._datepickerShowing=!1,a=this._get(o,"onClose"),a&&a.apply(o.input?o.input[0]:null,[o.input?o.input.val():"",o]),this._lastInput=null,this._inDialog&&(this._dialogInput.css({position:"absolute",left:"0",top:"-100px"}),e.blockUI&&(e.unblockUI(),e("body").append(this.dpDiv))),this._inDialog=!1)},_tidyDialog:function(e){e.dpDiv.removeClass(this._dialogClass).unbind(".ui-datepicker-calendar")},_checkExternalClick:function(t){if(e.datepicker._curInst){var i=e(t.target),s=e.datepicker._getInst(i[0]);(i[0].id!==e.datepicker._mainDivId&&0===i.parents("#"+e.datepicker._mainDivId).length&&!i.hasClass(e.datepicker.markerClassName)&&!i.closest("."+e.datepicker._triggerClass).length&&e.datepicker._datepickerShowing&&(!e.datepicker._inDialog||!e.blockUI)||i.hasClass(e.datepicker.markerClassName)&&e.datepicker._curInst!==s)&&e.datepicker._hideDatepicker()}},_adjustDate:function(t,i,s){var n=e(t),a=this._getInst(n[0]);this._isDisabledDatepicker(n[0])||(this._adjustInstDate(a,i+("M"===s?this._get(a,"showCurrentAtPos"):0),s),this._updateDatepicker(a))},_gotoToday:function(t){var i,s=e(t),n=this._getInst(s[0]);this._get(n,"gotoCurrent")&&n.currentDay?(n.selectedDay=n.currentDay,n.drawMonth=n.selectedMonth=n.currentMonth,n.drawYear=n.selectedYear=n.currentYear):(i=new Date,n.selectedDay=i.getDate(),n.drawMonth=n.selectedMonth=i.getMonth(),n.drawYear=n.selectedYear=i.getFullYear()),this._notifyChange(n),this._adjustDate(s)},_selectMonthYear:function(t,i,s){var n=e(t),a=this._getInst(n[0]);a["selected"+("M"===s?"Month":"Year")]=a["draw"+("M"===s?"Month":"Year")]=parseInt(i.options[i.selectedIndex].value,10),this._notifyChange(a),this._adjustDate(n)},_selectDay:function(t,i,s,n){var a,o=e(t);e(n).hasClass(this._unselectableClass)||this._isDisabledDatepicker(o[0])||(a=this._getInst(o[0]),a.selectedDay=a.currentDay=e("a",n).html(),a.selectedMonth=a.currentMonth=i,a.selectedYear=a.currentYear=s,this._selectDate(t,this._formatDate(a,a.currentDay,a.currentMonth,a.currentYear)))},_clearDate:function(t){var i=e(t);this._selectDate(i,"")},_selectDate:function(t,i){var s,n=e(t),a=this._getInst(n[0]);i=null!=i?i:this._formatDate(a),a.input&&a.input.val(i),this._updateAlternate(a),s=this._get(a,"onSelect"),s?s.apply(a.input?a.input[0]:null,[i,a]):a.input&&a.input.trigger("change"),a.inline?this._updateDatepicker(a):(this._hideDatepicker(),this._lastInput=a.input[0],"object"!=typeof a.input[0]&&a.input.focus(),this._lastInput=null)},_updateAlternate:function(t){var i,s,n,a=this._get(t,"altField");a&&(i=this._get(t,"altFormat")||this._get(t,"dateFormat"),s=this._getDate(t),n=this.formatDate(i,s,this._getFormatConfig(t)),e(a).each(function(){e(this).val(n)}))},noWeekends:function(e){var t=e.getDay();return[t>0&&6>t,""]},iso8601Week:function(e){var t,i=new Date(e.getTime());return i.setDate(i.getDate()+4-(i.getDay()||7)),t=i.getTime(),i.setMonth(0),i.setDate(1),Math.floor(Math.round((t-i)/864e5)/7)+1},parseDate:function(t,i,s){if(null==t||null==i)throw"Invalid arguments";if(i="object"==typeof i?""+i:i+"",""===i)return null;var n,a,o,r,h=0,l=(s?s.shortYearCutoff:null)||this._defaults.shortYearCutoff,u="string"!=typeof l?l:(new Date).getFullYear()%100+parseInt(l,10),d=(s?s.dayNamesShort:null)||this._defaults.dayNamesShort,c=(s?s.dayNames:null)||this._defaults.dayNames,p=(s?s.monthNamesShort:null)||this._defaults.monthNamesShort,f=(s?s.monthNames:null)||this._defaults.monthNames,m=-1,g=-1,v=-1,y=-1,b=!1,_=function(e){var i=t.length>n+1&&t.charAt(n+1)===e;return i&&n++,i},x=function(e){var t=_(e),s="@"===e?14:"!"===e?20:"y"===e&&t?4:"o"===e?3:2,n="y"===e?s:1,a=RegExp("^\\d{"+n+","+s+"}"),o=i.substring(h).match(a);if(!o)throw"Missing number at position "+h;return h+=o[0].length,parseInt(o[0],10)},w=function(t,s,n){var a=-1,o=e.map(_(t)?n:s,function(e,t){return[[t,e]]}).sort(function(e,t){return-(e[1].length-t[1].length)});if(e.each(o,function(e,t){var s=t[1];return i.substr(h,s.length).toLowerCase()===s.toLowerCase()?(a=t[0],h+=s.length,!1):void 0}),-1!==a)return a+1;throw"Unknown name at position "+h},k=function(){if(i.charAt(h)!==t.charAt(n))throw"Unexpected literal at position "+h;h++};for(n=0;t.length>n;n++)if(b)"'"!==t.charAt(n)||_("'")?k():b=!1;else switch(t.charAt(n)){case"d":v=x("d");break;case"D":w("D",d,c);break;case"o":y=x("o");break;case"m":g=x("m");break;case"M":g=w("M",p,f);break;case"y":m=x("y");break;case"@":r=new Date(x("@")),m=r.getFullYear(),g=r.getMonth()+1,v=r.getDate();break;case"!":r=new Date((x("!")-this._ticksTo1970)/1e4),m=r.getFullYear(),g=r.getMonth()+1,v=r.getDate();break;case"'":_("'")?k():b=!0;break;default:k()}if(i.length>h&&(o=i.substr(h),!/^\s+/.test(o)))throw"Extra/unparsed characters found in date: "+o;if(-1===m?m=(new Date).getFullYear():100>m&&(m+=(new Date).getFullYear()-(new Date).getFullYear()%100+(u>=m?0:-100)),y>-1)for(g=1,v=y;;){if(a=this._getDaysInMonth(m,g-1),a>=v)break;g++,v-=a}if(r=this._daylightSavingAdjust(new Date(m,g-1,v)),r.getFullYear()!==m||r.getMonth()+1!==g||r.getDate()!==v)throw"Invalid date";return r},ATOM:"yy-mm-dd",COOKIE:"D, dd M yy",ISO_8601:"yy-mm-dd",RFC_822:"D, d M y",RFC_850:"DD, dd-M-y",RFC_1036:"D, d M y",RFC_1123:"D, d M yy",RFC_2822:"D, d M yy",RSS:"D, d M y",TICKS:"!",TIMESTAMP:"@",W3C:"yy-mm-dd",_ticksTo1970:1e7*60*60*24*(718685+Math.floor(492.5)-Math.floor(19.7)+Math.floor(4.925)),formatDate:function(e,t,i){if(!t)return"";var s,n=(i?i.dayNamesShort:null)||this._defaults.dayNamesShort,a=(i?i.dayNames:null)||this._defaults.dayNames,o=(i?i.monthNamesShort:null)||this._defaults.monthNamesShort,r=(i?i.monthNames:null)||this._defaults.monthNames,h=function(t){var i=e.length>s+1&&e.charAt(s+1)===t;return i&&s++,i},l=function(e,t,i){var s=""+t;if(h(e))for(;i>s.length;)s="0"+s;return s},u=function(e,t,i,s){return h(e)?s[t]:i[t]},d="",c=!1;if(t)for(s=0;e.length>s;s++)if(c)"'"!==e.charAt(s)||h("'")?d+=e.charAt(s):c=!1;else switch(e.charAt(s)){case"d":d+=l("d",t.getDate(),2);break;case"D":d+=u("D",t.getDay(),n,a);break;case"o":d+=l("o",Math.round((new Date(t.getFullYear(),t.getMonth(),t.getDate()).getTime()-new Date(t.getFullYear(),0,0).getTime())/864e5),3);break;case"m":d+=l("m",t.getMonth()+1,2);break;case"M":d+=u("M",t.getMonth(),o,r);break;case"y":d+=h("y")?t.getFullYear():(10>t.getYear()%100?"0":"")+t.getYear()%100;break;case"@":d+=t.getTime();break;case"!":d+=1e4*t.getTime()+this._ticksTo1970;break;case"'":h("'")?d+="'":c=!0;break;default:d+=e.charAt(s)}return d},_possibleChars:function(e){var t,i="",s=!1,n=function(i){var s=e.length>t+1&&e.charAt(t+1)===i;return s&&t++,s};for(t=0;e.length>t;t++)if(s)"'"!==e.charAt(t)||n("'")?i+=e.charAt(t):s=!1;else switch(e.charAt(t)){case"d":case"m":case"y":case"@":i+="0123456789";break;case"D":case"M":return null;case"'":n("'")?i+="'":s=!0;break;default:i+=e.charAt(t)}return i},_get:function(e,t){return void 0!==e.settings[t]?e.settings[t]:this._defaults[t]},_setDateFromField:function(e,t){if(e.input.val()!==e.lastVal){var i=this._get(e,"dateFormat"),s=e.lastVal=e.input?e.input.val():null,n=this._getDefaultDate(e),a=n,o=this._getFormatConfig(e);try{a=this.parseDate(i,s,o)||n}catch(r){s=t?"":s}e.selectedDay=a.getDate(),e.drawMonth=e.selectedMonth=a.getMonth(),e.drawYear=e.selectedYear=a.getFullYear(),e.currentDay=s?a.getDate():0,e.currentMonth=s?a.getMonth():0,e.currentYear=s?a.getFullYear():0,this._adjustInstDate(e)}},_getDefaultDate:function(e){return this._restrictMinMax(e,this._determineDate(e,this._get(e,"defaultDate"),new Date))},_determineDate:function(t,i,s){var n=function(e){var t=new Date;return t.setDate(t.getDate()+e),t},a=function(i){try{return e.datepicker.parseDate(e.datepicker._get(t,"dateFormat"),i,e.datepicker._getFormatConfig(t))}catch(s){}for(var n=(i.toLowerCase().match(/^c/)?e.datepicker._getDate(t):null)||new Date,a=n.getFullYear(),o=n.getMonth(),r=n.getDate(),h=/([+\-]?[0-9]+)\s*(d|D|w|W|m|M|y|Y)?/g,l=h.exec(i);l;){switch(l[2]||"d"){case"d":case"D":r+=parseInt(l[1],10);break;case"w":case"W":r+=7*parseInt(l[1],10);break;case"m":case"M":o+=parseInt(l[1],10),r=Math.min(r,e.datepicker._getDaysInMonth(a,o));break;case"y":case"Y":a+=parseInt(l[1],10),r=Math.min(r,e.datepicker._getDaysInMonth(a,o))}l=h.exec(i)}return new Date(a,o,r)},o=null==i||""===i?s:"string"==typeof i?a(i):"number"==typeof i?isNaN(i)?s:n(i):new Date(i.getTime());return o=o&&"Invalid Date"==""+o?s:o,o&&(o.setHours(0),o.setMinutes(0),o.setSeconds(0),o.setMilliseconds(0)),this._daylightSavingAdjust(o)},_daylightSavingAdjust:function(e){return e?(e.setHours(e.getHours()>12?e.getHours()+2:0),e):null},_setDate:function(e,t,i){var s=!t,n=e.selectedMonth,a=e.selectedYear,o=this._restrictMinMax(e,this._determineDate(e,t,new Date));e.selectedDay=e.currentDay=o.getDate(),e.drawMonth=e.selectedMonth=e.currentMonth=o.getMonth(),e.drawYear=e.selectedYear=e.currentYear=o.getFullYear(),n===e.selectedMonth&&a===e.selectedYear||i||this._notifyChange(e),this._adjustInstDate(e),e.input&&e.input.val(s?"":this._formatDate(e))},_getDate:function(e){var t=!e.currentYear||e.input&&""===e.input.val()?null:this._daylightSavingAdjust(new Date(e.currentYear,e.currentMonth,e.currentDay));return t},_attachHandlers:function(t){var i=this._get(t,"stepMonths"),s="#"+t.id.replace(/\\\\/g,"\\");t.dpDiv.find("[data-handler]").map(function(){var t={prev:function(){e.datepicker._adjustDate(s,-i,"M")},next:function(){e.datepicker._adjustDate(s,+i,"M")},hide:function(){e.datepicker._hideDatepicker()},today:function(){e.datepicker._gotoToday(s)},selectDay:function(){return e.datepicker._selectDay(s,+this.getAttribute("data-month"),+this.getAttribute("data-year"),this),!1},selectMonth:function(){return e.datepicker._selectMonthYear(s,this,"M"),!1},selectYear:function(){return e.datepicker._selectMonthYear(s,this,"Y"),!1}};e(this).bind(this.getAttribute("data-event"),t[this.getAttribute("data-handler")])})},_generateHTML:function(e){var t,i,s,n,a,o,r,h,l,u,d,c,p,f,m,g,v,y,b,_,x,w,k,T,D,S,M,C,N,A,P,I,H,z,F,E,O,j,W,L=new Date,R=this._daylightSavingAdjust(new Date(L.getFullYear(),L.getMonth(),L.getDate())),Y=this._get(e,"isRTL"),B=this._get(e,"showButtonPanel"),J=this._get(e,"hideIfNoPrevNext"),q=this._get(e,"navigationAsDateFormat"),K=this._getNumberOfMonths(e),V=this._get(e,"showCurrentAtPos"),U=this._get(e,"stepMonths"),Q=1!==K[0]||1!==K[1],G=this._daylightSavingAdjust(e.currentDay?new Date(e.currentYear,e.currentMonth,e.currentDay):new Date(9999,9,9)),X=this._getMinMaxDate(e,"min"),$=this._getMinMaxDate(e,"max"),Z=e.drawMonth-V,et=e.drawYear;if(0>Z&&(Z+=12,et--),$)for(t=this._daylightSavingAdjust(new Date($.getFullYear(),$.getMonth()-K[0]*K[1]+1,$.getDate())),t=X&&X>t?X:t;this._daylightSavingAdjust(new Date(et,Z,1))>t;)Z--,0>Z&&(Z=11,et--);for(e.drawMonth=Z,e.drawYear=et,i=this._get(e,"prevText"),i=q?this.formatDate(i,this._daylightSavingAdjust(new Date(et,Z-U,1)),this._getFormatConfig(e)):i,s=this._canAdjustMonth(e,-1,et,Z)?""+i+"":J?"":""+i+"",n=this._get(e,"nextText"),n=q?this.formatDate(n,this._daylightSavingAdjust(new Date(et,Z+U,1)),this._getFormatConfig(e)):n,a=this._canAdjustMonth(e,1,et,Z)?""+n+"":J?"":""+n+"",o=this._get(e,"currentText"),r=this._get(e,"gotoCurrent")&&e.currentDay?G:R,o=q?this.formatDate(o,r,this._getFormatConfig(e)):o,h=e.inline?"":"",l=B?"
      "+(Y?h:"")+(this._isInRange(e,r)?"":"")+(Y?"":h)+"
      ":"",u=parseInt(this._get(e,"firstDay"),10),u=isNaN(u)?0:u,d=this._get(e,"showWeek"),c=this._get(e,"dayNames"),p=this._get(e,"dayNamesMin"),f=this._get(e,"monthNames"),m=this._get(e,"monthNamesShort"),g=this._get(e,"beforeShowDay"),v=this._get(e,"showOtherMonths"),y=this._get(e,"selectOtherMonths"),b=this._getDefaultDate(e),_="",w=0;K[0]>w;w++){for(k="",this.maxRows=4,T=0;K[1]>T;T++){if(D=this._daylightSavingAdjust(new Date(et,Z,e.selectedDay)),S=" ui-corner-all",M="",Q){if(M+="
      "}for(M+="
      "+(/all|left/.test(S)&&0===w?Y?a:s:"")+(/all|right/.test(S)&&0===w?Y?s:a:"")+this._generateMonthYearHeader(e,Z,et,X,$,w>0||T>0,f,m)+"
      "+"",C=d?"":"",x=0;7>x;x++)N=(x+u)%7,C+="";for(M+=C+"",A=this._getDaysInMonth(et,Z),et===e.selectedYear&&Z===e.selectedMonth&&(e.selectedDay=Math.min(e.selectedDay,A)),P=(this._getFirstDayOfMonth(et,Z)-u+7)%7,I=Math.ceil((P+A)/7),H=Q?this.maxRows>I?this.maxRows:I:I,this.maxRows=H,z=this._daylightSavingAdjust(new Date(et,Z,1-P)),F=0;H>F;F++){for(M+="",E=d?"":"",x=0;7>x;x++)O=g?g.apply(e.input?e.input[0]:null,[z]):[!0,""],j=z.getMonth()!==Z,W=j&&!y||!O[0]||X&&X>z||$&&z>$,E+="",z.setDate(z.getDate()+1),z=this._daylightSavingAdjust(z);M+=E+""}Z++,Z>11&&(Z=0,et++),M+="
      "+this._get(e,"weekHeader")+"=5?" class='ui-datepicker-week-end'":"")+">"+""+p[N]+"
      "+this._get(e,"calculateWeek")(z)+""+(j&&!v?" ":W?""+z.getDate()+"":""+z.getDate()+"")+"
      "+(Q?"
      "+(K[0]>0&&T===K[1]-1?"
      ":""):""),k+=M}_+=k}return _+=l,e._keyEvent=!1,_},_generateMonthYearHeader:function(e,t,i,s,n,a,o,r){var h,l,u,d,c,p,f,m,g=this._get(e,"changeMonth"),v=this._get(e,"changeYear"),y=this._get(e,"showMonthAfterYear"),b="
      ",_="";if(a||!g)_+=""+o[t]+"";else{for(h=s&&s.getFullYear()===i,l=n&&n.getFullYear()===i,_+=""}if(y||(b+=_+(!a&&g&&v?"":" ")),!e.yearshtml)if(e.yearshtml="",a||!v)b+=""+i+"";else{for(d=this._get(e,"yearRange").split(":"),c=(new Date).getFullYear(),p=function(e){var t=e.match(/c[+\-].*/)?i+parseInt(e.substring(1),10):e.match(/[+\-].*/)?c+parseInt(e,10):parseInt(e,10);return isNaN(t)?c:t},f=p(d[0]),m=Math.max(f,p(d[1]||"")),f=s?Math.max(f,s.getFullYear()):f,m=n?Math.min(m,n.getFullYear()):m,e.yearshtml+="",b+=e.yearshtml,e.yearshtml=null}return b+=this._get(e,"yearSuffix"),y&&(b+=(!a&&g&&v?"":" ")+_),b+="
      "},_adjustInstDate:function(e,t,i){var s=e.drawYear+("Y"===i?t:0),n=e.drawMonth+("M"===i?t:0),a=Math.min(e.selectedDay,this._getDaysInMonth(s,n))+("D"===i?t:0),o=this._restrictMinMax(e,this._daylightSavingAdjust(new Date(s,n,a)));e.selectedDay=o.getDate(),e.drawMonth=e.selectedMonth=o.getMonth(),e.drawYear=e.selectedYear=o.getFullYear(),("M"===i||"Y"===i)&&this._notifyChange(e)},_restrictMinMax:function(e,t){var i=this._getMinMaxDate(e,"min"),s=this._getMinMaxDate(e,"max"),n=i&&i>t?i:t;return s&&n>s?s:n},_notifyChange:function(e){var t=this._get(e,"onChangeMonthYear");t&&t.apply(e.input?e.input[0]:null,[e.selectedYear,e.selectedMonth+1,e])},_getNumberOfMonths:function(e){var t=this._get(e,"numberOfMonths");return null==t?[1,1]:"number"==typeof t?[1,t]:t},_getMinMaxDate:function(e,t){return this._determineDate(e,this._get(e,t+"Date"),null)},_getDaysInMonth:function(e,t){return 32-this._daylightSavingAdjust(new Date(e,t,32)).getDate()},_getFirstDayOfMonth:function(e,t){return new Date(e,t,1).getDay()},_canAdjustMonth:function(e,t,i,s){var n=this._getNumberOfMonths(e),a=this._daylightSavingAdjust(new Date(i,s+(0>t?t:n[0]*n[1]),1));return 0>t&&a.setDate(this._getDaysInMonth(a.getFullYear(),a.getMonth())),this._isInRange(e,a)},_isInRange:function(e,t){var i,s,n=this._getMinMaxDate(e,"min"),a=this._getMinMaxDate(e,"max"),o=null,r=null,h=this._get(e,"yearRange");return h&&(i=h.split(":"),s=(new Date).getFullYear(),o=parseInt(i[0],10),r=parseInt(i[1],10),i[0].match(/[+\-].*/)&&(o+=s),i[1].match(/[+\-].*/)&&(r+=s)),(!n||t.getTime()>=n.getTime())&&(!a||t.getTime()<=a.getTime())&&(!o||t.getFullYear()>=o)&&(!r||r>=t.getFullYear())},_getFormatConfig:function(e){var t=this._get(e,"shortYearCutoff");return t="string"!=typeof t?t:(new Date).getFullYear()%100+parseInt(t,10),{shortYearCutoff:t,dayNamesShort:this._get(e,"dayNamesShort"),dayNames:this._get(e,"dayNames"),monthNamesShort:this._get(e,"monthNamesShort"),monthNames:this._get(e,"monthNames")}},_formatDate:function(e,t,i,s){t||(e.currentDay=e.selectedDay,e.currentMonth=e.selectedMonth,e.currentYear=e.selectedYear);var n=t?"object"==typeof t?t:this._daylightSavingAdjust(new Date(s,i,t)):this._daylightSavingAdjust(new Date(e.currentYear,e.currentMonth,e.currentDay));return this.formatDate(this._get(e,"dateFormat"),n,this._getFormatConfig(e))}}),e.fn.datepicker=function(t){if(!this.length)return this;e.datepicker.initialized||(e(document).mousedown(e.datepicker._checkExternalClick),e.datepicker.initialized=!0),0===e("#"+e.datepicker._mainDivId).length&&e("body").append(e.datepicker.dpDiv);var i=Array.prototype.slice.call(arguments,1);return"string"!=typeof t||"isDisabled"!==t&&"getDate"!==t&&"widget"!==t?"option"===t&&2===arguments.length&&"string"==typeof arguments[1]?e.datepicker["_"+t+"Datepicker"].apply(e.datepicker,[this[0]].concat(i)):this.each(function(){"string"==typeof t?e.datepicker["_"+t+"Datepicker"].apply(e.datepicker,[this].concat(i)):e.datepicker._attachDatepicker(this,t)}):e.datepicker["_"+t+"Datepicker"].apply(e.datepicker,[this[0]].concat(i))},e.datepicker=new n,e.datepicker.initialized=!1,e.datepicker.uuid=(new Date).getTime(),e.datepicker.version="1.11.4",e.datepicker,e.widget("ui.draggable",e.ui.mouse,{version:"1.11.4",widgetEventPrefix:"drag",options:{addClasses:!0,appendTo:"parent",axis:!1,connectToSortable:!1,containment:!1,cursor:"auto",cursorAt:!1,grid:!1,handle:!1,helper:"original",iframeFix:!1,opacity:!1,refreshPositions:!1,revert:!1,revertDuration:500,scope:"default",scroll:!0,scrollSensitivity:20,scrollSpeed:20,snap:!1,snapMode:"both",snapTolerance:20,stack:!1,zIndex:!1,drag:null,start:null,stop:null},_create:function(){"original"===this.options.helper&&this._setPositionRelative(),this.options.addClasses&&this.element.addClass("ui-draggable"),this.options.disabled&&this.element.addClass("ui-draggable-disabled"),this._setHandleClassName(),this._mouseInit()},_setOption:function(e,t){this._super(e,t),"handle"===e&&(this._removeHandleClassName(),this._setHandleClassName())},_destroy:function(){return(this.helper||this.element).is(".ui-draggable-dragging")?(this.destroyOnClear=!0,void 0):(this.element.removeClass("ui-draggable ui-draggable-dragging ui-draggable-disabled"),this._removeHandleClassName(),this._mouseDestroy(),void 0)},_mouseCapture:function(t){var i=this.options;return this._blurActiveElement(t),this.helper||i.disabled||e(t.target).closest(".ui-resizable-handle").length>0?!1:(this.handle=this._getHandle(t),this.handle?(this._blockFrames(i.iframeFix===!0?"iframe":i.iframeFix),!0):!1)},_blockFrames:function(t){this.iframeBlocks=this.document.find(t).map(function(){var t=e(this);return e("
      ").css("position","absolute").appendTo(t.parent()).outerWidth(t.outerWidth()).outerHeight(t.outerHeight()).offset(t.offset())[0]})},_unblockFrames:function(){this.iframeBlocks&&(this.iframeBlocks.remove(),delete this.iframeBlocks)},_blurActiveElement:function(t){var i=this.document[0];if(this.handleElement.is(t.target))try{i.activeElement&&"body"!==i.activeElement.nodeName.toLowerCase()&&e(i.activeElement).blur()}catch(s){}},_mouseStart:function(t){var i=this.options;return this.helper=this._createHelper(t),this.helper.addClass("ui-draggable-dragging"),this._cacheHelperProportions(),e.ui.ddmanager&&(e.ui.ddmanager.current=this),this._cacheMargins(),this.cssPosition=this.helper.css("position"),this.scrollParent=this.helper.scrollParent(!0),this.offsetParent=this.helper.offsetParent(),this.hasFixedAncestor=this.helper.parents().filter(function(){return"fixed"===e(this).css("position")}).length>0,this.positionAbs=this.element.offset(),this._refreshOffsets(t),this.originalPosition=this.position=this._generatePosition(t,!1),this.originalPageX=t.pageX,this.originalPageY=t.pageY,i.cursorAt&&this._adjustOffsetFromHelper(i.cursorAt),this._setContainment(),this._trigger("start",t)===!1?(this._clear(),!1):(this._cacheHelperProportions(),e.ui.ddmanager&&!i.dropBehaviour&&e.ui.ddmanager.prepareOffsets(this,t),this._normalizeRightBottom(),this._mouseDrag(t,!0),e.ui.ddmanager&&e.ui.ddmanager.dragStart(this,t),!0)},_refreshOffsets:function(e){this.offset={top:this.positionAbs.top-this.margins.top,left:this.positionAbs.left-this.margins.left,scroll:!1,parent:this._getParentOffset(),relative:this._getRelativeOffset()},this.offset.click={left:e.pageX-this.offset.left,top:e.pageY-this.offset.top}},_mouseDrag:function(t,i){if(this.hasFixedAncestor&&(this.offset.parent=this._getParentOffset()),this.position=this._generatePosition(t,!0),this.positionAbs=this._convertPositionTo("absolute"),!i){var s=this._uiHash();if(this._trigger("drag",t,s)===!1)return this._mouseUp({}),!1;this.position=s.position}return this.helper[0].style.left=this.position.left+"px",this.helper[0].style.top=this.position.top+"px",e.ui.ddmanager&&e.ui.ddmanager.drag(this,t),!1},_mouseStop:function(t){var i=this,s=!1;return e.ui.ddmanager&&!this.options.dropBehaviour&&(s=e.ui.ddmanager.drop(this,t)),this.dropped&&(s=this.dropped,this.dropped=!1),"invalid"===this.options.revert&&!s||"valid"===this.options.revert&&s||this.options.revert===!0||e.isFunction(this.options.revert)&&this.options.revert.call(this.element,s)?e(this.helper).animate(this.originalPosition,parseInt(this.options.revertDuration,10),function(){i._trigger("stop",t)!==!1&&i._clear()}):this._trigger("stop",t)!==!1&&this._clear(),!1},_mouseUp:function(t){return this._unblockFrames(),e.ui.ddmanager&&e.ui.ddmanager.dragStop(this,t),this.handleElement.is(t.target)&&this.element.focus(),e.ui.mouse.prototype._mouseUp.call(this,t)},cancel:function(){return this.helper.is(".ui-draggable-dragging")?this._mouseUp({}):this._clear(),this},_getHandle:function(t){return this.options.handle?!!e(t.target).closest(this.element.find(this.options.handle)).length:!0},_setHandleClassName:function(){this.handleElement=this.options.handle?this.element.find(this.options.handle):this.element,this.handleElement.addClass("ui-draggable-handle")},_removeHandleClassName:function(){this.handleElement.removeClass("ui-draggable-handle")},_createHelper:function(t){var i=this.options,s=e.isFunction(i.helper),n=s?e(i.helper.apply(this.element[0],[t])):"clone"===i.helper?this.element.clone().removeAttr("id"):this.element;return n.parents("body").length||n.appendTo("parent"===i.appendTo?this.element[0].parentNode:i.appendTo),s&&n[0]===this.element[0]&&this._setPositionRelative(),n[0]===this.element[0]||/(fixed|absolute)/.test(n.css("position"))||n.css("position","absolute"),n},_setPositionRelative:function(){/^(?:r|a|f)/.test(this.element.css("position"))||(this.element[0].style.position="relative")},_adjustOffsetFromHelper:function(t){"string"==typeof t&&(t=t.split(" ")),e.isArray(t)&&(t={left:+t[0],top:+t[1]||0}),"left"in t&&(this.offset.click.left=t.left+this.margins.left),"right"in t&&(this.offset.click.left=this.helperProportions.width-t.right+this.margins.left),"top"in t&&(this.offset.click.top=t.top+this.margins.top),"bottom"in t&&(this.offset.click.top=this.helperProportions.height-t.bottom+this.margins.top)},_isRootNode:function(e){return/(html|body)/i.test(e.tagName)||e===this.document[0]},_getParentOffset:function(){var t=this.offsetParent.offset(),i=this.document[0];return"absolute"===this.cssPosition&&this.scrollParent[0]!==i&&e.contains(this.scrollParent[0],this.offsetParent[0])&&(t.left+=this.scrollParent.scrollLeft(),t.top+=this.scrollParent.scrollTop()),this._isRootNode(this.offsetParent[0])&&(t={top:0,left:0}),{top:t.top+(parseInt(this.offsetParent.css("borderTopWidth"),10)||0),left:t.left+(parseInt(this.offsetParent.css("borderLeftWidth"),10)||0)}},_getRelativeOffset:function(){if("relative"!==this.cssPosition)return{top:0,left:0};var e=this.element.position(),t=this._isRootNode(this.scrollParent[0]);return{top:e.top-(parseInt(this.helper.css("top"),10)||0)+(t?0:this.scrollParent.scrollTop()),left:e.left-(parseInt(this.helper.css("left"),10)||0)+(t?0:this.scrollParent.scrollLeft())}},_cacheMargins:function(){this.margins={left:parseInt(this.element.css("marginLeft"),10)||0,top:parseInt(this.element.css("marginTop"),10)||0,right:parseInt(this.element.css("marginRight"),10)||0,bottom:parseInt(this.element.css("marginBottom"),10)||0}},_cacheHelperProportions:function(){this.helperProportions={width:this.helper.outerWidth(),height:this.helper.outerHeight()}},_setContainment:function(){var t,i,s,n=this.options,a=this.document[0];return this.relativeContainer=null,n.containment?"window"===n.containment?(this.containment=[e(window).scrollLeft()-this.offset.relative.left-this.offset.parent.left,e(window).scrollTop()-this.offset.relative.top-this.offset.parent.top,e(window).scrollLeft()+e(window).width()-this.helperProportions.width-this.margins.left,e(window).scrollTop()+(e(window).height()||a.body.parentNode.scrollHeight)-this.helperProportions.height-this.margins.top],void 0):"document"===n.containment?(this.containment=[0,0,e(a).width()-this.helperProportions.width-this.margins.left,(e(a).height()||a.body.parentNode.scrollHeight)-this.helperProportions.height-this.margins.top],void 0):n.containment.constructor===Array?(this.containment=n.containment,void 0):("parent"===n.containment&&(n.containment=this.helper[0].parentNode),i=e(n.containment),s=i[0],s&&(t=/(scroll|auto)/.test(i.css("overflow")),this.containment=[(parseInt(i.css("borderLeftWidth"),10)||0)+(parseInt(i.css("paddingLeft"),10)||0),(parseInt(i.css("borderTopWidth"),10)||0)+(parseInt(i.css("paddingTop"),10)||0),(t?Math.max(s.scrollWidth,s.offsetWidth):s.offsetWidth)-(parseInt(i.css("borderRightWidth"),10)||0)-(parseInt(i.css("paddingRight"),10)||0)-this.helperProportions.width-this.margins.left-this.margins.right,(t?Math.max(s.scrollHeight,s.offsetHeight):s.offsetHeight)-(parseInt(i.css("borderBottomWidth"),10)||0)-(parseInt(i.css("paddingBottom"),10)||0)-this.helperProportions.height-this.margins.top-this.margins.bottom],this.relativeContainer=i),void 0):(this.containment=null,void 0) +},_convertPositionTo:function(e,t){t||(t=this.position);var i="absolute"===e?1:-1,s=this._isRootNode(this.scrollParent[0]);return{top:t.top+this.offset.relative.top*i+this.offset.parent.top*i-("fixed"===this.cssPosition?-this.offset.scroll.top:s?0:this.offset.scroll.top)*i,left:t.left+this.offset.relative.left*i+this.offset.parent.left*i-("fixed"===this.cssPosition?-this.offset.scroll.left:s?0:this.offset.scroll.left)*i}},_generatePosition:function(e,t){var i,s,n,a,o=this.options,r=this._isRootNode(this.scrollParent[0]),h=e.pageX,l=e.pageY;return r&&this.offset.scroll||(this.offset.scroll={top:this.scrollParent.scrollTop(),left:this.scrollParent.scrollLeft()}),t&&(this.containment&&(this.relativeContainer?(s=this.relativeContainer.offset(),i=[this.containment[0]+s.left,this.containment[1]+s.top,this.containment[2]+s.left,this.containment[3]+s.top]):i=this.containment,e.pageX-this.offset.click.lefti[2]&&(h=i[2]+this.offset.click.left),e.pageY-this.offset.click.top>i[3]&&(l=i[3]+this.offset.click.top)),o.grid&&(n=o.grid[1]?this.originalPageY+Math.round((l-this.originalPageY)/o.grid[1])*o.grid[1]:this.originalPageY,l=i?n-this.offset.click.top>=i[1]||n-this.offset.click.top>i[3]?n:n-this.offset.click.top>=i[1]?n-o.grid[1]:n+o.grid[1]:n,a=o.grid[0]?this.originalPageX+Math.round((h-this.originalPageX)/o.grid[0])*o.grid[0]:this.originalPageX,h=i?a-this.offset.click.left>=i[0]||a-this.offset.click.left>i[2]?a:a-this.offset.click.left>=i[0]?a-o.grid[0]:a+o.grid[0]:a),"y"===o.axis&&(h=this.originalPageX),"x"===o.axis&&(l=this.originalPageY)),{top:l-this.offset.click.top-this.offset.relative.top-this.offset.parent.top+("fixed"===this.cssPosition?-this.offset.scroll.top:r?0:this.offset.scroll.top),left:h-this.offset.click.left-this.offset.relative.left-this.offset.parent.left+("fixed"===this.cssPosition?-this.offset.scroll.left:r?0:this.offset.scroll.left)}},_clear:function(){this.helper.removeClass("ui-draggable-dragging"),this.helper[0]===this.element[0]||this.cancelHelperRemoval||this.helper.remove(),this.helper=null,this.cancelHelperRemoval=!1,this.destroyOnClear&&this.destroy()},_normalizeRightBottom:function(){"y"!==this.options.axis&&"auto"!==this.helper.css("right")&&(this.helper.width(this.helper.width()),this.helper.css("right","auto")),"x"!==this.options.axis&&"auto"!==this.helper.css("bottom")&&(this.helper.height(this.helper.height()),this.helper.css("bottom","auto"))},_trigger:function(t,i,s){return s=s||this._uiHash(),e.ui.plugin.call(this,t,[i,s,this],!0),/^(drag|start|stop)/.test(t)&&(this.positionAbs=this._convertPositionTo("absolute"),s.offset=this.positionAbs),e.Widget.prototype._trigger.call(this,t,i,s)},plugins:{},_uiHash:function(){return{helper:this.helper,position:this.position,originalPosition:this.originalPosition,offset:this.positionAbs}}}),e.ui.plugin.add("draggable","connectToSortable",{start:function(t,i,s){var n=e.extend({},i,{item:s.element});s.sortables=[],e(s.options.connectToSortable).each(function(){var i=e(this).sortable("instance");i&&!i.options.disabled&&(s.sortables.push(i),i.refreshPositions(),i._trigger("activate",t,n))})},stop:function(t,i,s){var n=e.extend({},i,{item:s.element});s.cancelHelperRemoval=!1,e.each(s.sortables,function(){var e=this;e.isOver?(e.isOver=0,s.cancelHelperRemoval=!0,e.cancelHelperRemoval=!1,e._storedCSS={position:e.placeholder.css("position"),top:e.placeholder.css("top"),left:e.placeholder.css("left")},e._mouseStop(t),e.options.helper=e.options._helper):(e.cancelHelperRemoval=!0,e._trigger("deactivate",t,n))})},drag:function(t,i,s){e.each(s.sortables,function(){var n=!1,a=this;a.positionAbs=s.positionAbs,a.helperProportions=s.helperProportions,a.offset.click=s.offset.click,a._intersectsWith(a.containerCache)&&(n=!0,e.each(s.sortables,function(){return this.positionAbs=s.positionAbs,this.helperProportions=s.helperProportions,this.offset.click=s.offset.click,this!==a&&this._intersectsWith(this.containerCache)&&e.contains(a.element[0],this.element[0])&&(n=!1),n})),n?(a.isOver||(a.isOver=1,s._parent=i.helper.parent(),a.currentItem=i.helper.appendTo(a.element).data("ui-sortable-item",!0),a.options._helper=a.options.helper,a.options.helper=function(){return i.helper[0]},t.target=a.currentItem[0],a._mouseCapture(t,!0),a._mouseStart(t,!0,!0),a.offset.click.top=s.offset.click.top,a.offset.click.left=s.offset.click.left,a.offset.parent.left-=s.offset.parent.left-a.offset.parent.left,a.offset.parent.top-=s.offset.parent.top-a.offset.parent.top,s._trigger("toSortable",t),s.dropped=a.element,e.each(s.sortables,function(){this.refreshPositions()}),s.currentItem=s.element,a.fromOutside=s),a.currentItem&&(a._mouseDrag(t),i.position=a.position)):a.isOver&&(a.isOver=0,a.cancelHelperRemoval=!0,a.options._revert=a.options.revert,a.options.revert=!1,a._trigger("out",t,a._uiHash(a)),a._mouseStop(t,!0),a.options.revert=a.options._revert,a.options.helper=a.options._helper,a.placeholder&&a.placeholder.remove(),i.helper.appendTo(s._parent),s._refreshOffsets(t),i.position=s._generatePosition(t,!0),s._trigger("fromSortable",t),s.dropped=!1,e.each(s.sortables,function(){this.refreshPositions()}))})}}),e.ui.plugin.add("draggable","cursor",{start:function(t,i,s){var n=e("body"),a=s.options;n.css("cursor")&&(a._cursor=n.css("cursor")),n.css("cursor",a.cursor)},stop:function(t,i,s){var n=s.options;n._cursor&&e("body").css("cursor",n._cursor)}}),e.ui.plugin.add("draggable","opacity",{start:function(t,i,s){var n=e(i.helper),a=s.options;n.css("opacity")&&(a._opacity=n.css("opacity")),n.css("opacity",a.opacity)},stop:function(t,i,s){var n=s.options;n._opacity&&e(i.helper).css("opacity",n._opacity)}}),e.ui.plugin.add("draggable","scroll",{start:function(e,t,i){i.scrollParentNotHidden||(i.scrollParentNotHidden=i.helper.scrollParent(!1)),i.scrollParentNotHidden[0]!==i.document[0]&&"HTML"!==i.scrollParentNotHidden[0].tagName&&(i.overflowOffset=i.scrollParentNotHidden.offset())},drag:function(t,i,s){var n=s.options,a=!1,o=s.scrollParentNotHidden[0],r=s.document[0];o!==r&&"HTML"!==o.tagName?(n.axis&&"x"===n.axis||(s.overflowOffset.top+o.offsetHeight-t.pageY=0;c--)h=s.snapElements[c].left-s.margins.left,l=h+s.snapElements[c].width,u=s.snapElements[c].top-s.margins.top,d=u+s.snapElements[c].height,h-m>v||g>l+m||u-m>b||y>d+m||!e.contains(s.snapElements[c].item.ownerDocument,s.snapElements[c].item)?(s.snapElements[c].snapping&&s.options.snap.release&&s.options.snap.release.call(s.element,t,e.extend(s._uiHash(),{snapItem:s.snapElements[c].item})),s.snapElements[c].snapping=!1):("inner"!==f.snapMode&&(n=m>=Math.abs(u-b),a=m>=Math.abs(d-y),o=m>=Math.abs(h-v),r=m>=Math.abs(l-g),n&&(i.position.top=s._convertPositionTo("relative",{top:u-s.helperProportions.height,left:0}).top),a&&(i.position.top=s._convertPositionTo("relative",{top:d,left:0}).top),o&&(i.position.left=s._convertPositionTo("relative",{top:0,left:h-s.helperProportions.width}).left),r&&(i.position.left=s._convertPositionTo("relative",{top:0,left:l}).left)),p=n||a||o||r,"outer"!==f.snapMode&&(n=m>=Math.abs(u-y),a=m>=Math.abs(d-b),o=m>=Math.abs(h-g),r=m>=Math.abs(l-v),n&&(i.position.top=s._convertPositionTo("relative",{top:u,left:0}).top),a&&(i.position.top=s._convertPositionTo("relative",{top:d-s.helperProportions.height,left:0}).top),o&&(i.position.left=s._convertPositionTo("relative",{top:0,left:h}).left),r&&(i.position.left=s._convertPositionTo("relative",{top:0,left:l-s.helperProportions.width}).left)),!s.snapElements[c].snapping&&(n||a||o||r||p)&&s.options.snap.snap&&s.options.snap.snap.call(s.element,t,e.extend(s._uiHash(),{snapItem:s.snapElements[c].item})),s.snapElements[c].snapping=n||a||o||r||p)}}),e.ui.plugin.add("draggable","stack",{start:function(t,i,s){var n,a=s.options,o=e.makeArray(e(a.stack)).sort(function(t,i){return(parseInt(e(t).css("zIndex"),10)||0)-(parseInt(e(i).css("zIndex"),10)||0)});o.length&&(n=parseInt(e(o[0]).css("zIndex"),10)||0,e(o).each(function(t){e(this).css("zIndex",n+t)}),this.css("zIndex",n+o.length))}}),e.ui.plugin.add("draggable","zIndex",{start:function(t,i,s){var n=e(i.helper),a=s.options;n.css("zIndex")&&(a._zIndex=n.css("zIndex")),n.css("zIndex",a.zIndex)},stop:function(t,i,s){var n=s.options;n._zIndex&&e(i.helper).css("zIndex",n._zIndex)}}),e.ui.draggable,e.widget("ui.resizable",e.ui.mouse,{version:"1.11.4",widgetEventPrefix:"resize",options:{alsoResize:!1,animate:!1,animateDuration:"slow",animateEasing:"swing",aspectRatio:!1,autoHide:!1,containment:!1,ghost:!1,grid:!1,handles:"e,s,se",helper:!1,maxHeight:null,maxWidth:null,minHeight:10,minWidth:10,zIndex:90,resize:null,start:null,stop:null},_num:function(e){return parseInt(e,10)||0},_isNumber:function(e){return!isNaN(parseInt(e,10))},_hasScroll:function(t,i){if("hidden"===e(t).css("overflow"))return!1;var s=i&&"left"===i?"scrollLeft":"scrollTop",n=!1;return t[s]>0?!0:(t[s]=1,n=t[s]>0,t[s]=0,n)},_create:function(){var t,i,s,n,a,o=this,r=this.options;if(this.element.addClass("ui-resizable"),e.extend(this,{_aspectRatio:!!r.aspectRatio,aspectRatio:r.aspectRatio,originalElement:this.element,_proportionallyResizeElements:[],_helper:r.helper||r.ghost||r.animate?r.helper||"ui-resizable-helper":null}),this.element[0].nodeName.match(/^(canvas|textarea|input|select|button|img)$/i)&&(this.element.wrap(e("
      ").css({position:this.element.css("position"),width:this.element.outerWidth(),height:this.element.outerHeight(),top:this.element.css("top"),left:this.element.css("left")})),this.element=this.element.parent().data("ui-resizable",this.element.resizable("instance")),this.elementIsWrapper=!0,this.element.css({marginLeft:this.originalElement.css("marginLeft"),marginTop:this.originalElement.css("marginTop"),marginRight:this.originalElement.css("marginRight"),marginBottom:this.originalElement.css("marginBottom")}),this.originalElement.css({marginLeft:0,marginTop:0,marginRight:0,marginBottom:0}),this.originalResizeStyle=this.originalElement.css("resize"),this.originalElement.css("resize","none"),this._proportionallyResizeElements.push(this.originalElement.css({position:"static",zoom:1,display:"block"})),this.originalElement.css({margin:this.originalElement.css("margin")}),this._proportionallyResize()),this.handles=r.handles||(e(".ui-resizable-handle",this.element).length?{n:".ui-resizable-n",e:".ui-resizable-e",s:".ui-resizable-s",w:".ui-resizable-w",se:".ui-resizable-se",sw:".ui-resizable-sw",ne:".ui-resizable-ne",nw:".ui-resizable-nw"}:"e,s,se"),this._handles=e(),this.handles.constructor===String)for("all"===this.handles&&(this.handles="n,e,s,w,se,sw,ne,nw"),t=this.handles.split(","),this.handles={},i=0;t.length>i;i++)s=e.trim(t[i]),a="ui-resizable-"+s,n=e("
      "),n.css({zIndex:r.zIndex}),"se"===s&&n.addClass("ui-icon ui-icon-gripsmall-diagonal-se"),this.handles[s]=".ui-resizable-"+s,this.element.append(n);this._renderAxis=function(t){var i,s,n,a;t=t||this.element;for(i in this.handles)this.handles[i].constructor===String?this.handles[i]=this.element.children(this.handles[i]).first().show():(this.handles[i].jquery||this.handles[i].nodeType)&&(this.handles[i]=e(this.handles[i]),this._on(this.handles[i],{mousedown:o._mouseDown})),this.elementIsWrapper&&this.originalElement[0].nodeName.match(/^(textarea|input|select|button)$/i)&&(s=e(this.handles[i],this.element),a=/sw|ne|nw|se|n|s/.test(i)?s.outerHeight():s.outerWidth(),n=["padding",/ne|nw|n/.test(i)?"Top":/se|sw|s/.test(i)?"Bottom":/^e$/.test(i)?"Right":"Left"].join(""),t.css(n,a),this._proportionallyResize()),this._handles=this._handles.add(this.handles[i])},this._renderAxis(this.element),this._handles=this._handles.add(this.element.find(".ui-resizable-handle")),this._handles.disableSelection(),this._handles.mouseover(function(){o.resizing||(this.className&&(n=this.className.match(/ui-resizable-(se|sw|ne|nw|n|e|s|w)/i)),o.axis=n&&n[1]?n[1]:"se")}),r.autoHide&&(this._handles.hide(),e(this.element).addClass("ui-resizable-autohide").mouseenter(function(){r.disabled||(e(this).removeClass("ui-resizable-autohide"),o._handles.show())}).mouseleave(function(){r.disabled||o.resizing||(e(this).addClass("ui-resizable-autohide"),o._handles.hide())})),this._mouseInit()},_destroy:function(){this._mouseDestroy();var t,i=function(t){e(t).removeClass("ui-resizable ui-resizable-disabled ui-resizable-resizing").removeData("resizable").removeData("ui-resizable").unbind(".resizable").find(".ui-resizable-handle").remove()};return this.elementIsWrapper&&(i(this.element),t=this.element,this.originalElement.css({position:t.css("position"),width:t.outerWidth(),height:t.outerHeight(),top:t.css("top"),left:t.css("left")}).insertAfter(t),t.remove()),this.originalElement.css("resize",this.originalResizeStyle),i(this.originalElement),this},_mouseCapture:function(t){var i,s,n=!1;for(i in this.handles)s=e(this.handles[i])[0],(s===t.target||e.contains(s,t.target))&&(n=!0);return!this.options.disabled&&n},_mouseStart:function(t){var i,s,n,a=this.options,o=this.element;return this.resizing=!0,this._renderProxy(),i=this._num(this.helper.css("left")),s=this._num(this.helper.css("top")),a.containment&&(i+=e(a.containment).scrollLeft()||0,s+=e(a.containment).scrollTop()||0),this.offset=this.helper.offset(),this.position={left:i,top:s},this.size=this._helper?{width:this.helper.width(),height:this.helper.height()}:{width:o.width(),height:o.height()},this.originalSize=this._helper?{width:o.outerWidth(),height:o.outerHeight()}:{width:o.width(),height:o.height()},this.sizeDiff={width:o.outerWidth()-o.width(),height:o.outerHeight()-o.height()},this.originalPosition={left:i,top:s},this.originalMousePosition={left:t.pageX,top:t.pageY},this.aspectRatio="number"==typeof a.aspectRatio?a.aspectRatio:this.originalSize.width/this.originalSize.height||1,n=e(".ui-resizable-"+this.axis).css("cursor"),e("body").css("cursor","auto"===n?this.axis+"-resize":n),o.addClass("ui-resizable-resizing"),this._propagate("start",t),!0},_mouseDrag:function(t){var i,s,n=this.originalMousePosition,a=this.axis,o=t.pageX-n.left||0,r=t.pageY-n.top||0,h=this._change[a];return this._updatePrevProperties(),h?(i=h.apply(this,[t,o,r]),this._updateVirtualBoundaries(t.shiftKey),(this._aspectRatio||t.shiftKey)&&(i=this._updateRatio(i,t)),i=this._respectSize(i,t),this._updateCache(i),this._propagate("resize",t),s=this._applyChanges(),!this._helper&&this._proportionallyResizeElements.length&&this._proportionallyResize(),e.isEmptyObject(s)||(this._updatePrevProperties(),this._trigger("resize",t,this.ui()),this._applyChanges()),!1):!1},_mouseStop:function(t){this.resizing=!1;var i,s,n,a,o,r,h,l=this.options,u=this;return this._helper&&(i=this._proportionallyResizeElements,s=i.length&&/textarea/i.test(i[0].nodeName),n=s&&this._hasScroll(i[0],"left")?0:u.sizeDiff.height,a=s?0:u.sizeDiff.width,o={width:u.helper.width()-a,height:u.helper.height()-n},r=parseInt(u.element.css("left"),10)+(u.position.left-u.originalPosition.left)||null,h=parseInt(u.element.css("top"),10)+(u.position.top-u.originalPosition.top)||null,l.animate||this.element.css(e.extend(o,{top:h,left:r})),u.helper.height(u.size.height),u.helper.width(u.size.width),this._helper&&!l.animate&&this._proportionallyResize()),e("body").css("cursor","auto"),this.element.removeClass("ui-resizable-resizing"),this._propagate("stop",t),this._helper&&this.helper.remove(),!1},_updatePrevProperties:function(){this.prevPosition={top:this.position.top,left:this.position.left},this.prevSize={width:this.size.width,height:this.size.height}},_applyChanges:function(){var e={};return this.position.top!==this.prevPosition.top&&(e.top=this.position.top+"px"),this.position.left!==this.prevPosition.left&&(e.left=this.position.left+"px"),this.size.width!==this.prevSize.width&&(e.width=this.size.width+"px"),this.size.height!==this.prevSize.height&&(e.height=this.size.height+"px"),this.helper.css(e),e},_updateVirtualBoundaries:function(e){var t,i,s,n,a,o=this.options;a={minWidth:this._isNumber(o.minWidth)?o.minWidth:0,maxWidth:this._isNumber(o.maxWidth)?o.maxWidth:1/0,minHeight:this._isNumber(o.minHeight)?o.minHeight:0,maxHeight:this._isNumber(o.maxHeight)?o.maxHeight:1/0},(this._aspectRatio||e)&&(t=a.minHeight*this.aspectRatio,s=a.minWidth/this.aspectRatio,i=a.maxHeight*this.aspectRatio,n=a.maxWidth/this.aspectRatio,t>a.minWidth&&(a.minWidth=t),s>a.minHeight&&(a.minHeight=s),a.maxWidth>i&&(a.maxWidth=i),a.maxHeight>n&&(a.maxHeight=n)),this._vBoundaries=a},_updateCache:function(e){this.offset=this.helper.offset(),this._isNumber(e.left)&&(this.position.left=e.left),this._isNumber(e.top)&&(this.position.top=e.top),this._isNumber(e.height)&&(this.size.height=e.height),this._isNumber(e.width)&&(this.size.width=e.width)},_updateRatio:function(e){var t=this.position,i=this.size,s=this.axis;return this._isNumber(e.height)?e.width=e.height*this.aspectRatio:this._isNumber(e.width)&&(e.height=e.width/this.aspectRatio),"sw"===s&&(e.left=t.left+(i.width-e.width),e.top=null),"nw"===s&&(e.top=t.top+(i.height-e.height),e.left=t.left+(i.width-e.width)),e},_respectSize:function(e){var t=this._vBoundaries,i=this.axis,s=this._isNumber(e.width)&&t.maxWidth&&t.maxWidthe.width,o=this._isNumber(e.height)&&t.minHeight&&t.minHeight>e.height,r=this.originalPosition.left+this.originalSize.width,h=this.position.top+this.size.height,l=/sw|nw|w/.test(i),u=/nw|ne|n/.test(i);return a&&(e.width=t.minWidth),o&&(e.height=t.minHeight),s&&(e.width=t.maxWidth),n&&(e.height=t.maxHeight),a&&l&&(e.left=r-t.minWidth),s&&l&&(e.left=r-t.maxWidth),o&&u&&(e.top=h-t.minHeight),n&&u&&(e.top=h-t.maxHeight),e.width||e.height||e.left||!e.top?e.width||e.height||e.top||!e.left||(e.left=null):e.top=null,e},_getPaddingPlusBorderDimensions:function(e){for(var t=0,i=[],s=[e.css("borderTopWidth"),e.css("borderRightWidth"),e.css("borderBottomWidth"),e.css("borderLeftWidth")],n=[e.css("paddingTop"),e.css("paddingRight"),e.css("paddingBottom"),e.css("paddingLeft")];4>t;t++)i[t]=parseInt(s[t],10)||0,i[t]+=parseInt(n[t],10)||0;return{height:i[0]+i[2],width:i[1]+i[3]}},_proportionallyResize:function(){if(this._proportionallyResizeElements.length)for(var e,t=0,i=this.helper||this.element;this._proportionallyResizeElements.length>t;t++)e=this._proportionallyResizeElements[t],this.outerDimensions||(this.outerDimensions=this._getPaddingPlusBorderDimensions(e)),e.css({height:i.height()-this.outerDimensions.height||0,width:i.width()-this.outerDimensions.width||0})},_renderProxy:function(){var t=this.element,i=this.options;this.elementOffset=t.offset(),this._helper?(this.helper=this.helper||e("
      "),this.helper.addClass(this._helper).css({width:this.element.outerWidth()-1,height:this.element.outerHeight()-1,position:"absolute",left:this.elementOffset.left+"px",top:this.elementOffset.top+"px",zIndex:++i.zIndex}),this.helper.appendTo("body").disableSelection()):this.helper=this.element},_change:{e:function(e,t){return{width:this.originalSize.width+t}},w:function(e,t){var i=this.originalSize,s=this.originalPosition;return{left:s.left+t,width:i.width-t}},n:function(e,t,i){var s=this.originalSize,n=this.originalPosition;return{top:n.top+i,height:s.height-i}},s:function(e,t,i){return{height:this.originalSize.height+i}},se:function(t,i,s){return e.extend(this._change.s.apply(this,arguments),this._change.e.apply(this,[t,i,s]))},sw:function(t,i,s){return e.extend(this._change.s.apply(this,arguments),this._change.w.apply(this,[t,i,s]))},ne:function(t,i,s){return e.extend(this._change.n.apply(this,arguments),this._change.e.apply(this,[t,i,s]))},nw:function(t,i,s){return e.extend(this._change.n.apply(this,arguments),this._change.w.apply(this,[t,i,s]))}},_propagate:function(t,i){e.ui.plugin.call(this,t,[i,this.ui()]),"resize"!==t&&this._trigger(t,i,this.ui())},plugins:{},ui:function(){return{originalElement:this.originalElement,element:this.element,helper:this.helper,position:this.position,size:this.size,originalSize:this.originalSize,originalPosition:this.originalPosition}}}),e.ui.plugin.add("resizable","animate",{stop:function(t){var i=e(this).resizable("instance"),s=i.options,n=i._proportionallyResizeElements,a=n.length&&/textarea/i.test(n[0].nodeName),o=a&&i._hasScroll(n[0],"left")?0:i.sizeDiff.height,r=a?0:i.sizeDiff.width,h={width:i.size.width-r,height:i.size.height-o},l=parseInt(i.element.css("left"),10)+(i.position.left-i.originalPosition.left)||null,u=parseInt(i.element.css("top"),10)+(i.position.top-i.originalPosition.top)||null;i.element.animate(e.extend(h,u&&l?{top:u,left:l}:{}),{duration:s.animateDuration,easing:s.animateEasing,step:function(){var s={width:parseInt(i.element.css("width"),10),height:parseInt(i.element.css("height"),10),top:parseInt(i.element.css("top"),10),left:parseInt(i.element.css("left"),10)};n&&n.length&&e(n[0]).css({width:s.width,height:s.height}),i._updateCache(s),i._propagate("resize",t)}})}}),e.ui.plugin.add("resizable","containment",{start:function(){var t,i,s,n,a,o,r,h=e(this).resizable("instance"),l=h.options,u=h.element,d=l.containment,c=d instanceof e?d.get(0):/parent/.test(d)?u.parent().get(0):d;c&&(h.containerElement=e(c),/document/.test(d)||d===document?(h.containerOffset={left:0,top:0},h.containerPosition={left:0,top:0},h.parentData={element:e(document),left:0,top:0,width:e(document).width(),height:e(document).height()||document.body.parentNode.scrollHeight}):(t=e(c),i=[],e(["Top","Right","Left","Bottom"]).each(function(e,s){i[e]=h._num(t.css("padding"+s))}),h.containerOffset=t.offset(),h.containerPosition=t.position(),h.containerSize={height:t.innerHeight()-i[3],width:t.innerWidth()-i[1]},s=h.containerOffset,n=h.containerSize.height,a=h.containerSize.width,o=h._hasScroll(c,"left")?c.scrollWidth:a,r=h._hasScroll(c)?c.scrollHeight:n,h.parentData={element:c,left:s.left,top:s.top,width:o,height:r}))},resize:function(t){var i,s,n,a,o=e(this).resizable("instance"),r=o.options,h=o.containerOffset,l=o.position,u=o._aspectRatio||t.shiftKey,d={top:0,left:0},c=o.containerElement,p=!0;c[0]!==document&&/static/.test(c.css("position"))&&(d=h),l.left<(o._helper?h.left:0)&&(o.size.width=o.size.width+(o._helper?o.position.left-h.left:o.position.left-d.left),u&&(o.size.height=o.size.width/o.aspectRatio,p=!1),o.position.left=r.helper?h.left:0),l.top<(o._helper?h.top:0)&&(o.size.height=o.size.height+(o._helper?o.position.top-h.top:o.position.top),u&&(o.size.width=o.size.height*o.aspectRatio,p=!1),o.position.top=o._helper?h.top:0),n=o.containerElement.get(0)===o.element.parent().get(0),a=/relative|absolute/.test(o.containerElement.css("position")),n&&a?(o.offset.left=o.parentData.left+o.position.left,o.offset.top=o.parentData.top+o.position.top):(o.offset.left=o.element.offset().left,o.offset.top=o.element.offset().top),i=Math.abs(o.sizeDiff.width+(o._helper?o.offset.left-d.left:o.offset.left-h.left)),s=Math.abs(o.sizeDiff.height+(o._helper?o.offset.top-d.top:o.offset.top-h.top)),i+o.size.width>=o.parentData.width&&(o.size.width=o.parentData.width-i,u&&(o.size.height=o.size.width/o.aspectRatio,p=!1)),s+o.size.height>=o.parentData.height&&(o.size.height=o.parentData.height-s,u&&(o.size.width=o.size.height*o.aspectRatio,p=!1)),p||(o.position.left=o.prevPosition.left,o.position.top=o.prevPosition.top,o.size.width=o.prevSize.width,o.size.height=o.prevSize.height)},stop:function(){var t=e(this).resizable("instance"),i=t.options,s=t.containerOffset,n=t.containerPosition,a=t.containerElement,o=e(t.helper),r=o.offset(),h=o.outerWidth()-t.sizeDiff.width,l=o.outerHeight()-t.sizeDiff.height;t._helper&&!i.animate&&/relative/.test(a.css("position"))&&e(this).css({left:r.left-n.left-s.left,width:h,height:l}),t._helper&&!i.animate&&/static/.test(a.css("position"))&&e(this).css({left:r.left-n.left-s.left,width:h,height:l})}}),e.ui.plugin.add("resizable","alsoResize",{start:function(){var t=e(this).resizable("instance"),i=t.options;e(i.alsoResize).each(function(){var t=e(this);t.data("ui-resizable-alsoresize",{width:parseInt(t.width(),10),height:parseInt(t.height(),10),left:parseInt(t.css("left"),10),top:parseInt(t.css("top"),10)})})},resize:function(t,i){var s=e(this).resizable("instance"),n=s.options,a=s.originalSize,o=s.originalPosition,r={height:s.size.height-a.height||0,width:s.size.width-a.width||0,top:s.position.top-o.top||0,left:s.position.left-o.left||0};e(n.alsoResize).each(function(){var t=e(this),s=e(this).data("ui-resizable-alsoresize"),n={},a=t.parents(i.originalElement[0]).length?["width","height"]:["width","height","top","left"];e.each(a,function(e,t){var i=(s[t]||0)+(r[t]||0);i&&i>=0&&(n[t]=i||null)}),t.css(n)})},stop:function(){e(this).removeData("resizable-alsoresize")}}),e.ui.plugin.add("resizable","ghost",{start:function(){var t=e(this).resizable("instance"),i=t.options,s=t.size;t.ghost=t.originalElement.clone(),t.ghost.css({opacity:.25,display:"block",position:"relative",height:s.height,width:s.width,margin:0,left:0,top:0}).addClass("ui-resizable-ghost").addClass("string"==typeof i.ghost?i.ghost:""),t.ghost.appendTo(t.helper)},resize:function(){var t=e(this).resizable("instance");t.ghost&&t.ghost.css({position:"relative",height:t.size.height,width:t.size.width})},stop:function(){var t=e(this).resizable("instance");t.ghost&&t.helper&&t.helper.get(0).removeChild(t.ghost.get(0))}}),e.ui.plugin.add("resizable","grid",{resize:function(){var t,i=e(this).resizable("instance"),s=i.options,n=i.size,a=i.originalSize,o=i.originalPosition,r=i.axis,h="number"==typeof s.grid?[s.grid,s.grid]:s.grid,l=h[0]||1,u=h[1]||1,d=Math.round((n.width-a.width)/l)*l,c=Math.round((n.height-a.height)/u)*u,p=a.width+d,f=a.height+c,m=s.maxWidth&&p>s.maxWidth,g=s.maxHeight&&f>s.maxHeight,v=s.minWidth&&s.minWidth>p,y=s.minHeight&&s.minHeight>f;s.grid=h,v&&(p+=l),y&&(f+=u),m&&(p-=l),g&&(f-=u),/^(se|s|e)$/.test(r)?(i.size.width=p,i.size.height=f):/^(ne)$/.test(r)?(i.size.width=p,i.size.height=f,i.position.top=o.top-c):/^(sw)$/.test(r)?(i.size.width=p,i.size.height=f,i.position.left=o.left-d):((0>=f-u||0>=p-l)&&(t=i._getPaddingPlusBorderDimensions(this)),f-u>0?(i.size.height=f,i.position.top=o.top-c):(f=u-t.height,i.size.height=f,i.position.top=o.top+a.height-f),p-l>0?(i.size.width=p,i.position.left=o.left-d):(p=l-t.width,i.size.width=p,i.position.left=o.left+a.width-p))}}),e.ui.resizable,e.widget("ui.dialog",{version:"1.11.4",options:{appendTo:"body",autoOpen:!0,buttons:[],closeOnEscape:!0,closeText:"Close",dialogClass:"",draggable:!0,hide:null,height:"auto",maxHeight:null,maxWidth:null,minHeight:150,minWidth:150,modal:!1,position:{my:"center",at:"center",of:window,collision:"fit",using:function(t){var i=e(this).css(t).offset().top;0>i&&e(this).css("top",t.top-i)}},resizable:!0,show:null,title:null,width:300,beforeClose:null,close:null,drag:null,dragStart:null,dragStop:null,focus:null,open:null,resize:null,resizeStart:null,resizeStop:null},sizeRelatedOptions:{buttons:!0,height:!0,maxHeight:!0,maxWidth:!0,minHeight:!0,minWidth:!0,width:!0},resizableRelatedOptions:{maxHeight:!0,maxWidth:!0,minHeight:!0,minWidth:!0},_create:function(){this.originalCss={display:this.element[0].style.display,width:this.element[0].style.width,minHeight:this.element[0].style.minHeight,maxHeight:this.element[0].style.maxHeight,height:this.element[0].style.height},this.originalPosition={parent:this.element.parent(),index:this.element.parent().children().index(this.element)},this.originalTitle=this.element.attr("title"),this.options.title=this.options.title||this.originalTitle,this._createWrapper(),this.element.show().removeAttr("title").addClass("ui-dialog-content ui-widget-content").appendTo(this.uiDialog),this._createTitlebar(),this._createButtonPane(),this.options.draggable&&e.fn.draggable&&this._makeDraggable(),this.options.resizable&&e.fn.resizable&&this._makeResizable(),this._isOpen=!1,this._trackFocus()},_init:function(){this.options.autoOpen&&this.open()},_appendTo:function(){var t=this.options.appendTo;return t&&(t.jquery||t.nodeType)?e(t):this.document.find(t||"body").eq(0)},_destroy:function(){var e,t=this.originalPosition;this._untrackInstance(),this._destroyOverlay(),this.element.removeUniqueId().removeClass("ui-dialog-content ui-widget-content").css(this.originalCss).detach(),this.uiDialog.stop(!0,!0).remove(),this.originalTitle&&this.element.attr("title",this.originalTitle),e=t.parent.children().eq(t.index),e.length&&e[0]!==this.element[0]?e.before(this.element):t.parent.append(this.element)},widget:function(){return this.uiDialog},disable:e.noop,enable:e.noop,close:function(t){var i,s=this;if(this._isOpen&&this._trigger("beforeClose",t)!==!1){if(this._isOpen=!1,this._focusedElement=null,this._destroyOverlay(),this._untrackInstance(),!this.opener.filter(":focusable").focus().length)try{i=this.document[0].activeElement,i&&"body"!==i.nodeName.toLowerCase()&&e(i).blur()}catch(n){}this._hide(this.uiDialog,this.options.hide,function(){s._trigger("close",t)})}},isOpen:function(){return this._isOpen},moveToTop:function(){this._moveToTop()},_moveToTop:function(t,i){var s=!1,n=this.uiDialog.siblings(".ui-front:visible").map(function(){return+e(this).css("z-index")}).get(),a=Math.max.apply(null,n);return a>=+this.uiDialog.css("z-index")&&(this.uiDialog.css("z-index",a+1),s=!0),s&&!i&&this._trigger("focus",t),s},open:function(){var t=this;return this._isOpen?(this._moveToTop()&&this._focusTabbable(),void 0):(this._isOpen=!0,this.opener=e(this.document[0].activeElement),this._size(),this._position(),this._createOverlay(),this._moveToTop(null,!0),this.overlay&&this.overlay.css("z-index",this.uiDialog.css("z-index")-1),this._show(this.uiDialog,this.options.show,function(){t._focusTabbable(),t._trigger("focus")}),this._makeFocusTarget(),this._trigger("open"),void 0)},_focusTabbable:function(){var e=this._focusedElement;e||(e=this.element.find("[autofocus]")),e.length||(e=this.element.find(":tabbable")),e.length||(e=this.uiDialogButtonPane.find(":tabbable")),e.length||(e=this.uiDialogTitlebarClose.filter(":tabbable")),e.length||(e=this.uiDialog),e.eq(0).focus()},_keepFocus:function(t){function i(){var t=this.document[0].activeElement,i=this.uiDialog[0]===t||e.contains(this.uiDialog[0],t);i||this._focusTabbable()}t.preventDefault(),i.call(this),this._delay(i)},_createWrapper:function(){this.uiDialog=e("
      ").addClass("ui-dialog ui-widget ui-widget-content ui-corner-all ui-front "+this.options.dialogClass).hide().attr({tabIndex:-1,role:"dialog"}).appendTo(this._appendTo()),this._on(this.uiDialog,{keydown:function(t){if(this.options.closeOnEscape&&!t.isDefaultPrevented()&&t.keyCode&&t.keyCode===e.ui.keyCode.ESCAPE)return t.preventDefault(),this.close(t),void 0; +if(t.keyCode===e.ui.keyCode.TAB&&!t.isDefaultPrevented()){var i=this.uiDialog.find(":tabbable"),s=i.filter(":first"),n=i.filter(":last");t.target!==n[0]&&t.target!==this.uiDialog[0]||t.shiftKey?t.target!==s[0]&&t.target!==this.uiDialog[0]||!t.shiftKey||(this._delay(function(){n.focus()}),t.preventDefault()):(this._delay(function(){s.focus()}),t.preventDefault())}},mousedown:function(e){this._moveToTop(e)&&this._focusTabbable()}}),this.element.find("[aria-describedby]").length||this.uiDialog.attr({"aria-describedby":this.element.uniqueId().attr("id")})},_createTitlebar:function(){var t;this.uiDialogTitlebar=e("
      ").addClass("ui-dialog-titlebar ui-widget-header ui-corner-all ui-helper-clearfix").prependTo(this.uiDialog),this._on(this.uiDialogTitlebar,{mousedown:function(t){e(t.target).closest(".ui-dialog-titlebar-close")||this.uiDialog.focus()}}),this.uiDialogTitlebarClose=e("").button({label:this.options.closeText,icons:{primary:"ui-icon-closethick"},text:!1}).addClass("ui-dialog-titlebar-close").appendTo(this.uiDialogTitlebar),this._on(this.uiDialogTitlebarClose,{click:function(e){e.preventDefault(),this.close(e)}}),t=e("").uniqueId().addClass("ui-dialog-title").prependTo(this.uiDialogTitlebar),this._title(t),this.uiDialog.attr({"aria-labelledby":t.attr("id")})},_title:function(e){this.options.title||e.html(" "),e.text(this.options.title)},_createButtonPane:function(){this.uiDialogButtonPane=e("
      ").addClass("ui-dialog-buttonpane ui-widget-content ui-helper-clearfix"),this.uiButtonSet=e("
      ").addClass("ui-dialog-buttonset").appendTo(this.uiDialogButtonPane),this._createButtons()},_createButtons:function(){var t=this,i=this.options.buttons;return this.uiDialogButtonPane.remove(),this.uiButtonSet.empty(),e.isEmptyObject(i)||e.isArray(i)&&!i.length?(this.uiDialog.removeClass("ui-dialog-buttons"),void 0):(e.each(i,function(i,s){var n,a;s=e.isFunction(s)?{click:s,text:i}:s,s=e.extend({type:"button"},s),n=s.click,s.click=function(){n.apply(t.element[0],arguments)},a={icons:s.icons,text:s.showText},delete s.icons,delete s.showText,e("",s).button(a).appendTo(t.uiButtonSet)}),this.uiDialog.addClass("ui-dialog-buttons"),this.uiDialogButtonPane.appendTo(this.uiDialog),void 0)},_makeDraggable:function(){function t(e){return{position:e.position,offset:e.offset}}var i=this,s=this.options;this.uiDialog.draggable({cancel:".ui-dialog-content, .ui-dialog-titlebar-close",handle:".ui-dialog-titlebar",containment:"document",start:function(s,n){e(this).addClass("ui-dialog-dragging"),i._blockFrames(),i._trigger("dragStart",s,t(n))},drag:function(e,s){i._trigger("drag",e,t(s))},stop:function(n,a){var o=a.offset.left-i.document.scrollLeft(),r=a.offset.top-i.document.scrollTop();s.position={my:"left top",at:"left"+(o>=0?"+":"")+o+" "+"top"+(r>=0?"+":"")+r,of:i.window},e(this).removeClass("ui-dialog-dragging"),i._unblockFrames(),i._trigger("dragStop",n,t(a))}})},_makeResizable:function(){function t(e){return{originalPosition:e.originalPosition,originalSize:e.originalSize,position:e.position,size:e.size}}var i=this,s=this.options,n=s.resizable,a=this.uiDialog.css("position"),o="string"==typeof n?n:"n,e,s,w,se,sw,ne,nw";this.uiDialog.resizable({cancel:".ui-dialog-content",containment:"document",alsoResize:this.element,maxWidth:s.maxWidth,maxHeight:s.maxHeight,minWidth:s.minWidth,minHeight:this._minHeight(),handles:o,start:function(s,n){e(this).addClass("ui-dialog-resizing"),i._blockFrames(),i._trigger("resizeStart",s,t(n))},resize:function(e,s){i._trigger("resize",e,t(s))},stop:function(n,a){var o=i.uiDialog.offset(),r=o.left-i.document.scrollLeft(),h=o.top-i.document.scrollTop();s.height=i.uiDialog.height(),s.width=i.uiDialog.width(),s.position={my:"left top",at:"left"+(r>=0?"+":"")+r+" "+"top"+(h>=0?"+":"")+h,of:i.window},e(this).removeClass("ui-dialog-resizing"),i._unblockFrames(),i._trigger("resizeStop",n,t(a))}}).css("position",a)},_trackFocus:function(){this._on(this.widget(),{focusin:function(t){this._makeFocusTarget(),this._focusedElement=e(t.target)}})},_makeFocusTarget:function(){this._untrackInstance(),this._trackingInstances().unshift(this)},_untrackInstance:function(){var t=this._trackingInstances(),i=e.inArray(this,t);-1!==i&&t.splice(i,1)},_trackingInstances:function(){var e=this.document.data("ui-dialog-instances");return e||(e=[],this.document.data("ui-dialog-instances",e)),e},_minHeight:function(){var e=this.options;return"auto"===e.height?e.minHeight:Math.min(e.minHeight,e.height)},_position:function(){var e=this.uiDialog.is(":visible");e||this.uiDialog.show(),this.uiDialog.position(this.options.position),e||this.uiDialog.hide()},_setOptions:function(t){var i=this,s=!1,n={};e.each(t,function(e,t){i._setOption(e,t),e in i.sizeRelatedOptions&&(s=!0),e in i.resizableRelatedOptions&&(n[e]=t)}),s&&(this._size(),this._position()),this.uiDialog.is(":data(ui-resizable)")&&this.uiDialog.resizable("option",n)},_setOption:function(e,t){var i,s,n=this.uiDialog;"dialogClass"===e&&n.removeClass(this.options.dialogClass).addClass(t),"disabled"!==e&&(this._super(e,t),"appendTo"===e&&this.uiDialog.appendTo(this._appendTo()),"buttons"===e&&this._createButtons(),"closeText"===e&&this.uiDialogTitlebarClose.button({label:""+t}),"draggable"===e&&(i=n.is(":data(ui-draggable)"),i&&!t&&n.draggable("destroy"),!i&&t&&this._makeDraggable()),"position"===e&&this._position(),"resizable"===e&&(s=n.is(":data(ui-resizable)"),s&&!t&&n.resizable("destroy"),s&&"string"==typeof t&&n.resizable("option","handles",t),s||t===!1||this._makeResizable()),"title"===e&&this._title(this.uiDialogTitlebar.find(".ui-dialog-title")))},_size:function(){var e,t,i,s=this.options;this.element.show().css({width:"auto",minHeight:0,maxHeight:"none",height:0}),s.minWidth>s.width&&(s.width=s.minWidth),e=this.uiDialog.css({height:"auto",width:s.width}).outerHeight(),t=Math.max(0,s.minHeight-e),i="number"==typeof s.maxHeight?Math.max(0,s.maxHeight-e):"none","auto"===s.height?this.element.css({minHeight:t,maxHeight:i,height:"auto"}):this.element.height(Math.max(0,s.height-e)),this.uiDialog.is(":data(ui-resizable)")&&this.uiDialog.resizable("option","minHeight",this._minHeight())},_blockFrames:function(){this.iframeBlocks=this.document.find("iframe").map(function(){var t=e(this);return e("
      ").css({position:"absolute",width:t.outerWidth(),height:t.outerHeight()}).appendTo(t.parent()).offset(t.offset())[0]})},_unblockFrames:function(){this.iframeBlocks&&(this.iframeBlocks.remove(),delete this.iframeBlocks)},_allowInteraction:function(t){return e(t.target).closest(".ui-dialog").length?!0:!!e(t.target).closest(".ui-datepicker").length},_createOverlay:function(){if(this.options.modal){var t=!0;this._delay(function(){t=!1}),this.document.data("ui-dialog-overlays")||this._on(this.document,{focusin:function(e){t||this._allowInteraction(e)||(e.preventDefault(),this._trackingInstances()[0]._focusTabbable())}}),this.overlay=e("
      ").addClass("ui-widget-overlay ui-front").appendTo(this._appendTo()),this._on(this.overlay,{mousedown:"_keepFocus"}),this.document.data("ui-dialog-overlays",(this.document.data("ui-dialog-overlays")||0)+1)}},_destroyOverlay:function(){if(this.options.modal&&this.overlay){var e=this.document.data("ui-dialog-overlays")-1;e?this.document.data("ui-dialog-overlays",e):this.document.unbind("focusin").removeData("ui-dialog-overlays"),this.overlay.remove(),this.overlay=null}}}),e.widget("ui.droppable",{version:"1.11.4",widgetEventPrefix:"drop",options:{accept:"*",activeClass:!1,addClasses:!0,greedy:!1,hoverClass:!1,scope:"default",tolerance:"intersect",activate:null,deactivate:null,drop:null,out:null,over:null},_create:function(){var t,i=this.options,s=i.accept;this.isover=!1,this.isout=!0,this.accept=e.isFunction(s)?s:function(e){return e.is(s)},this.proportions=function(){return arguments.length?(t=arguments[0],void 0):t?t:t={width:this.element[0].offsetWidth,height:this.element[0].offsetHeight}},this._addToManager(i.scope),i.addClasses&&this.element.addClass("ui-droppable")},_addToManager:function(t){e.ui.ddmanager.droppables[t]=e.ui.ddmanager.droppables[t]||[],e.ui.ddmanager.droppables[t].push(this)},_splice:function(e){for(var t=0;e.length>t;t++)e[t]===this&&e.splice(t,1)},_destroy:function(){var t=e.ui.ddmanager.droppables[this.options.scope];this._splice(t),this.element.removeClass("ui-droppable ui-droppable-disabled")},_setOption:function(t,i){if("accept"===t)this.accept=e.isFunction(i)?i:function(e){return e.is(i)};else if("scope"===t){var s=e.ui.ddmanager.droppables[this.options.scope];this._splice(s),this._addToManager(i)}this._super(t,i)},_activate:function(t){var i=e.ui.ddmanager.current;this.options.activeClass&&this.element.addClass(this.options.activeClass),i&&this._trigger("activate",t,this.ui(i))},_deactivate:function(t){var i=e.ui.ddmanager.current;this.options.activeClass&&this.element.removeClass(this.options.activeClass),i&&this._trigger("deactivate",t,this.ui(i))},_over:function(t){var i=e.ui.ddmanager.current;i&&(i.currentItem||i.element)[0]!==this.element[0]&&this.accept.call(this.element[0],i.currentItem||i.element)&&(this.options.hoverClass&&this.element.addClass(this.options.hoverClass),this._trigger("over",t,this.ui(i)))},_out:function(t){var i=e.ui.ddmanager.current;i&&(i.currentItem||i.element)[0]!==this.element[0]&&this.accept.call(this.element[0],i.currentItem||i.element)&&(this.options.hoverClass&&this.element.removeClass(this.options.hoverClass),this._trigger("out",t,this.ui(i)))},_drop:function(t,i){var s=i||e.ui.ddmanager.current,n=!1;return s&&(s.currentItem||s.element)[0]!==this.element[0]?(this.element.find(":data(ui-droppable)").not(".ui-draggable-dragging").each(function(){var i=e(this).droppable("instance");return i.options.greedy&&!i.options.disabled&&i.options.scope===s.options.scope&&i.accept.call(i.element[0],s.currentItem||s.element)&&e.ui.intersect(s,e.extend(i,{offset:i.element.offset()}),i.options.tolerance,t)?(n=!0,!1):void 0}),n?!1:this.accept.call(this.element[0],s.currentItem||s.element)?(this.options.activeClass&&this.element.removeClass(this.options.activeClass),this.options.hoverClass&&this.element.removeClass(this.options.hoverClass),this._trigger("drop",t,this.ui(s)),this.element):!1):!1},ui:function(e){return{draggable:e.currentItem||e.element,helper:e.helper,position:e.position,offset:e.positionAbs}}}),e.ui.intersect=function(){function e(e,t,i){return e>=t&&t+i>e}return function(t,i,s,n){if(!i.offset)return!1;var a=(t.positionAbs||t.position.absolute).left+t.margins.left,o=(t.positionAbs||t.position.absolute).top+t.margins.top,r=a+t.helperProportions.width,h=o+t.helperProportions.height,l=i.offset.left,u=i.offset.top,d=l+i.proportions().width,c=u+i.proportions().height;switch(s){case"fit":return a>=l&&d>=r&&o>=u&&c>=h;case"intersect":return a+t.helperProportions.width/2>l&&d>r-t.helperProportions.width/2&&o+t.helperProportions.height/2>u&&c>h-t.helperProportions.height/2;case"pointer":return e(n.pageY,u,i.proportions().height)&&e(n.pageX,l,i.proportions().width);case"touch":return(o>=u&&c>=o||h>=u&&c>=h||u>o&&h>c)&&(a>=l&&d>=a||r>=l&&d>=r||l>a&&r>d);default:return!1}}}(),e.ui.ddmanager={current:null,droppables:{"default":[]},prepareOffsets:function(t,i){var s,n,a=e.ui.ddmanager.droppables[t.options.scope]||[],o=i?i.type:null,r=(t.currentItem||t.element).find(":data(ui-droppable)").addBack();e:for(s=0;a.length>s;s++)if(!(a[s].options.disabled||t&&!a[s].accept.call(a[s].element[0],t.currentItem||t.element))){for(n=0;r.length>n;n++)if(r[n]===a[s].element[0]){a[s].proportions().height=0;continue e}a[s].visible="none"!==a[s].element.css("display"),a[s].visible&&("mousedown"===o&&a[s]._activate.call(a[s],i),a[s].offset=a[s].element.offset(),a[s].proportions({width:a[s].element[0].offsetWidth,height:a[s].element[0].offsetHeight}))}},drop:function(t,i){var s=!1;return e.each((e.ui.ddmanager.droppables[t.options.scope]||[]).slice(),function(){this.options&&(!this.options.disabled&&this.visible&&e.ui.intersect(t,this,this.options.tolerance,i)&&(s=this._drop.call(this,i)||s),!this.options.disabled&&this.visible&&this.accept.call(this.element[0],t.currentItem||t.element)&&(this.isout=!0,this.isover=!1,this._deactivate.call(this,i)))}),s},dragStart:function(t,i){t.element.parentsUntil("body").bind("scroll.droppable",function(){t.options.refreshPositions||e.ui.ddmanager.prepareOffsets(t,i)})},drag:function(t,i){t.options.refreshPositions&&e.ui.ddmanager.prepareOffsets(t,i),e.each(e.ui.ddmanager.droppables[t.options.scope]||[],function(){if(!this.options.disabled&&!this.greedyChild&&this.visible){var s,n,a,o=e.ui.intersect(t,this,this.options.tolerance,i),r=!o&&this.isover?"isout":o&&!this.isover?"isover":null;r&&(this.options.greedy&&(n=this.options.scope,a=this.element.parents(":data(ui-droppable)").filter(function(){return e(this).droppable("instance").options.scope===n}),a.length&&(s=e(a[0]).droppable("instance"),s.greedyChild="isover"===r)),s&&"isover"===r&&(s.isover=!1,s.isout=!0,s._out.call(s,i)),this[r]=!0,this["isout"===r?"isover":"isout"]=!1,this["isover"===r?"_over":"_out"].call(this,i),s&&"isout"===r&&(s.isout=!1,s.isover=!0,s._over.call(s,i)))}})},dragStop:function(t,i){t.element.parentsUntil("body").unbind("scroll.droppable"),t.options.refreshPositions||e.ui.ddmanager.prepareOffsets(t,i)}},e.ui.droppable;var y="ui-effects-",b=e;e.effects={effect:{}},function(e,t){function i(e,t,i){var s=d[t.type]||{};return null==e?i||!t.def?null:t.def:(e=s.floor?~~e:parseFloat(e),isNaN(e)?t.def:s.mod?(e+s.mod)%s.mod:0>e?0:e>s.max?s.max:e)}function s(i){var s=l(),n=s._rgba=[];return i=i.toLowerCase(),f(h,function(e,a){var o,r=a.re.exec(i),h=r&&a.parse(r),l=a.space||"rgba";return h?(o=s[l](h),s[u[l].cache]=o[u[l].cache],n=s._rgba=o._rgba,!1):t}),n.length?("0,0,0,0"===n.join()&&e.extend(n,a.transparent),s):a[i]}function n(e,t,i){return i=(i+1)%1,1>6*i?e+6*(t-e)*i:1>2*i?t:2>3*i?e+6*(t-e)*(2/3-i):e}var a,o="backgroundColor borderBottomColor borderLeftColor borderRightColor borderTopColor color columnRuleColor outlineColor textDecorationColor textEmphasisColor",r=/^([\-+])=\s*(\d+\.?\d*)/,h=[{re:/rgba?\(\s*(\d{1,3})\s*,\s*(\d{1,3})\s*,\s*(\d{1,3})\s*(?:,\s*(\d?(?:\.\d+)?)\s*)?\)/,parse:function(e){return[e[1],e[2],e[3],e[4]]}},{re:/rgba?\(\s*(\d+(?:\.\d+)?)\%\s*,\s*(\d+(?:\.\d+)?)\%\s*,\s*(\d+(?:\.\d+)?)\%\s*(?:,\s*(\d?(?:\.\d+)?)\s*)?\)/,parse:function(e){return[2.55*e[1],2.55*e[2],2.55*e[3],e[4]]}},{re:/#([a-f0-9]{2})([a-f0-9]{2})([a-f0-9]{2})/,parse:function(e){return[parseInt(e[1],16),parseInt(e[2],16),parseInt(e[3],16)]}},{re:/#([a-f0-9])([a-f0-9])([a-f0-9])/,parse:function(e){return[parseInt(e[1]+e[1],16),parseInt(e[2]+e[2],16),parseInt(e[3]+e[3],16)]}},{re:/hsla?\(\s*(\d+(?:\.\d+)?)\s*,\s*(\d+(?:\.\d+)?)\%\s*,\s*(\d+(?:\.\d+)?)\%\s*(?:,\s*(\d?(?:\.\d+)?)\s*)?\)/,space:"hsla",parse:function(e){return[e[1],e[2]/100,e[3]/100,e[4]]}}],l=e.Color=function(t,i,s,n){return new e.Color.fn.parse(t,i,s,n)},u={rgba:{props:{red:{idx:0,type:"byte"},green:{idx:1,type:"byte"},blue:{idx:2,type:"byte"}}},hsla:{props:{hue:{idx:0,type:"degrees"},saturation:{idx:1,type:"percent"},lightness:{idx:2,type:"percent"}}}},d={"byte":{floor:!0,max:255},percent:{max:1},degrees:{mod:360,floor:!0}},c=l.support={},p=e("

      ")[0],f=e.each;p.style.cssText="background-color:rgba(1,1,1,.5)",c.rgba=p.style.backgroundColor.indexOf("rgba")>-1,f(u,function(e,t){t.cache="_"+e,t.props.alpha={idx:3,type:"percent",def:1}}),l.fn=e.extend(l.prototype,{parse:function(n,o,r,h){if(n===t)return this._rgba=[null,null,null,null],this;(n.jquery||n.nodeType)&&(n=e(n).css(o),o=t);var d=this,c=e.type(n),p=this._rgba=[];return o!==t&&(n=[n,o,r,h],c="array"),"string"===c?this.parse(s(n)||a._default):"array"===c?(f(u.rgba.props,function(e,t){p[t.idx]=i(n[t.idx],t)}),this):"object"===c?(n instanceof l?f(u,function(e,t){n[t.cache]&&(d[t.cache]=n[t.cache].slice())}):f(u,function(t,s){var a=s.cache;f(s.props,function(e,t){if(!d[a]&&s.to){if("alpha"===e||null==n[e])return;d[a]=s.to(d._rgba)}d[a][t.idx]=i(n[e],t,!0)}),d[a]&&0>e.inArray(null,d[a].slice(0,3))&&(d[a][3]=1,s.from&&(d._rgba=s.from(d[a])))}),this):t},is:function(e){var i=l(e),s=!0,n=this;return f(u,function(e,a){var o,r=i[a.cache];return r&&(o=n[a.cache]||a.to&&a.to(n._rgba)||[],f(a.props,function(e,i){return null!=r[i.idx]?s=r[i.idx]===o[i.idx]:t})),s}),s},_space:function(){var e=[],t=this;return f(u,function(i,s){t[s.cache]&&e.push(i)}),e.pop()},transition:function(e,t){var s=l(e),n=s._space(),a=u[n],o=0===this.alpha()?l("transparent"):this,r=o[a.cache]||a.to(o._rgba),h=r.slice();return s=s[a.cache],f(a.props,function(e,n){var a=n.idx,o=r[a],l=s[a],u=d[n.type]||{};null!==l&&(null===o?h[a]=l:(u.mod&&(l-o>u.mod/2?o+=u.mod:o-l>u.mod/2&&(o-=u.mod)),h[a]=i((l-o)*t+o,n)))}),this[n](h)},blend:function(t){if(1===this._rgba[3])return this;var i=this._rgba.slice(),s=i.pop(),n=l(t)._rgba;return l(e.map(i,function(e,t){return(1-s)*n[t]+s*e}))},toRgbaString:function(){var t="rgba(",i=e.map(this._rgba,function(e,t){return null==e?t>2?1:0:e});return 1===i[3]&&(i.pop(),t="rgb("),t+i.join()+")"},toHslaString:function(){var t="hsla(",i=e.map(this.hsla(),function(e,t){return null==e&&(e=t>2?1:0),t&&3>t&&(e=Math.round(100*e)+"%"),e});return 1===i[3]&&(i.pop(),t="hsl("),t+i.join()+")"},toHexString:function(t){var i=this._rgba.slice(),s=i.pop();return t&&i.push(~~(255*s)),"#"+e.map(i,function(e){return e=(e||0).toString(16),1===e.length?"0"+e:e}).join("")},toString:function(){return 0===this._rgba[3]?"transparent":this.toRgbaString()}}),l.fn.parse.prototype=l.fn,u.hsla.to=function(e){if(null==e[0]||null==e[1]||null==e[2])return[null,null,null,e[3]];var t,i,s=e[0]/255,n=e[1]/255,a=e[2]/255,o=e[3],r=Math.max(s,n,a),h=Math.min(s,n,a),l=r-h,u=r+h,d=.5*u;return t=h===r?0:s===r?60*(n-a)/l+360:n===r?60*(a-s)/l+120:60*(s-n)/l+240,i=0===l?0:.5>=d?l/u:l/(2-u),[Math.round(t)%360,i,d,null==o?1:o]},u.hsla.from=function(e){if(null==e[0]||null==e[1]||null==e[2])return[null,null,null,e[3]];var t=e[0]/360,i=e[1],s=e[2],a=e[3],o=.5>=s?s*(1+i):s+i-s*i,r=2*s-o;return[Math.round(255*n(r,o,t+1/3)),Math.round(255*n(r,o,t)),Math.round(255*n(r,o,t-1/3)),a]},f(u,function(s,n){var a=n.props,o=n.cache,h=n.to,u=n.from;l.fn[s]=function(s){if(h&&!this[o]&&(this[o]=h(this._rgba)),s===t)return this[o].slice();var n,r=e.type(s),d="array"===r||"object"===r?s:arguments,c=this[o].slice();return f(a,function(e,t){var s=d["object"===r?e:t.idx];null==s&&(s=c[t.idx]),c[t.idx]=i(s,t)}),u?(n=l(u(c)),n[o]=c,n):l(c)},f(a,function(t,i){l.fn[t]||(l.fn[t]=function(n){var a,o=e.type(n),h="alpha"===t?this._hsla?"hsla":"rgba":s,l=this[h](),u=l[i.idx];return"undefined"===o?u:("function"===o&&(n=n.call(this,u),o=e.type(n)),null==n&&i.empty?this:("string"===o&&(a=r.exec(n),a&&(n=u+parseFloat(a[2])*("+"===a[1]?1:-1))),l[i.idx]=n,this[h](l)))})})}),l.hook=function(t){var i=t.split(" ");f(i,function(t,i){e.cssHooks[i]={set:function(t,n){var a,o,r="";if("transparent"!==n&&("string"!==e.type(n)||(a=s(n)))){if(n=l(a||n),!c.rgba&&1!==n._rgba[3]){for(o="backgroundColor"===i?t.parentNode:t;(""===r||"transparent"===r)&&o&&o.style;)try{r=e.css(o,"backgroundColor"),o=o.parentNode}catch(h){}n=n.blend(r&&"transparent"!==r?r:"_default")}n=n.toRgbaString()}try{t.style[i]=n}catch(h){}}},e.fx.step[i]=function(t){t.colorInit||(t.start=l(t.elem,i),t.end=l(t.end),t.colorInit=!0),e.cssHooks[i].set(t.elem,t.start.transition(t.end,t.pos))}})},l.hook(o),e.cssHooks.borderColor={expand:function(e){var t={};return f(["Top","Right","Bottom","Left"],function(i,s){t["border"+s+"Color"]=e}),t}},a=e.Color.names={aqua:"#00ffff",black:"#000000",blue:"#0000ff",fuchsia:"#ff00ff",gray:"#808080",green:"#008000",lime:"#00ff00",maroon:"#800000",navy:"#000080",olive:"#808000",purple:"#800080",red:"#ff0000",silver:"#c0c0c0",teal:"#008080",white:"#ffffff",yellow:"#ffff00",transparent:[null,null,null,0],_default:"#ffffff"}}(b),function(){function t(t){var i,s,n=t.ownerDocument.defaultView?t.ownerDocument.defaultView.getComputedStyle(t,null):t.currentStyle,a={};if(n&&n.length&&n[0]&&n[n[0]])for(s=n.length;s--;)i=n[s],"string"==typeof n[i]&&(a[e.camelCase(i)]=n[i]);else for(i in n)"string"==typeof n[i]&&(a[i]=n[i]);return a}function i(t,i){var s,a,o={};for(s in i)a=i[s],t[s]!==a&&(n[s]||(e.fx.step[s]||!isNaN(parseFloat(a)))&&(o[s]=a));return o}var s=["add","remove","toggle"],n={border:1,borderBottom:1,borderColor:1,borderLeft:1,borderRight:1,borderTop:1,borderWidth:1,margin:1,padding:1};e.each(["borderLeftStyle","borderRightStyle","borderBottomStyle","borderTopStyle"],function(t,i){e.fx.step[i]=function(e){("none"!==e.end&&!e.setAttr||1===e.pos&&!e.setAttr)&&(b.style(e.elem,i,e.end),e.setAttr=!0)}}),e.fn.addBack||(e.fn.addBack=function(e){return this.add(null==e?this.prevObject:this.prevObject.filter(e))}),e.effects.animateClass=function(n,a,o,r){var h=e.speed(a,o,r);return this.queue(function(){var a,o=e(this),r=o.attr("class")||"",l=h.children?o.find("*").addBack():o;l=l.map(function(){var i=e(this);return{el:i,start:t(this)}}),a=function(){e.each(s,function(e,t){n[t]&&o[t+"Class"](n[t])})},a(),l=l.map(function(){return this.end=t(this.el[0]),this.diff=i(this.start,this.end),this}),o.attr("class",r),l=l.map(function(){var t=this,i=e.Deferred(),s=e.extend({},h,{queue:!1,complete:function(){i.resolve(t)}});return this.el.animate(this.diff,s),i.promise()}),e.when.apply(e,l.get()).done(function(){a(),e.each(arguments,function(){var t=this.el;e.each(this.diff,function(e){t.css(e,"")})}),h.complete.call(o[0])})})},e.fn.extend({addClass:function(t){return function(i,s,n,a){return s?e.effects.animateClass.call(this,{add:i},s,n,a):t.apply(this,arguments)}}(e.fn.addClass),removeClass:function(t){return function(i,s,n,a){return arguments.length>1?e.effects.animateClass.call(this,{remove:i},s,n,a):t.apply(this,arguments)}}(e.fn.removeClass),toggleClass:function(t){return function(i,s,n,a,o){return"boolean"==typeof s||void 0===s?n?e.effects.animateClass.call(this,s?{add:i}:{remove:i},n,a,o):t.apply(this,arguments):e.effects.animateClass.call(this,{toggle:i},s,n,a)}}(e.fn.toggleClass),switchClass:function(t,i,s,n,a){return e.effects.animateClass.call(this,{add:i,remove:t},s,n,a)}})}(),function(){function t(t,i,s,n){return e.isPlainObject(t)&&(i=t,t=t.effect),t={effect:t},null==i&&(i={}),e.isFunction(i)&&(n=i,s=null,i={}),("number"==typeof i||e.fx.speeds[i])&&(n=s,s=i,i={}),e.isFunction(s)&&(n=s,s=null),i&&e.extend(t,i),s=s||i.duration,t.duration=e.fx.off?0:"number"==typeof s?s:s in e.fx.speeds?e.fx.speeds[s]:e.fx.speeds._default,t.complete=n||i.complete,t}function i(t){return!t||"number"==typeof t||e.fx.speeds[t]?!0:"string"!=typeof t||e.effects.effect[t]?e.isFunction(t)?!0:"object"!=typeof t||t.effect?!1:!0:!0}e.extend(e.effects,{version:"1.11.4",save:function(e,t){for(var i=0;t.length>i;i++)null!==t[i]&&e.data(y+t[i],e[0].style[t[i]])},restore:function(e,t){var i,s;for(s=0;t.length>s;s++)null!==t[s]&&(i=e.data(y+t[s]),void 0===i&&(i=""),e.css(t[s],i))},setMode:function(e,t){return"toggle"===t&&(t=e.is(":hidden")?"show":"hide"),t},getBaseline:function(e,t){var i,s;switch(e[0]){case"top":i=0;break;case"middle":i=.5;break;case"bottom":i=1;break;default:i=e[0]/t.height}switch(e[1]){case"left":s=0;break;case"center":s=.5;break;case"right":s=1;break;default:s=e[1]/t.width}return{x:s,y:i}},createWrapper:function(t){if(t.parent().is(".ui-effects-wrapper"))return t.parent();var i={width:t.outerWidth(!0),height:t.outerHeight(!0),"float":t.css("float")},s=e("

      ").addClass("ui-effects-wrapper").css({fontSize:"100%",background:"transparent",border:"none",margin:0,padding:0}),n={width:t.width(),height:t.height()},a=document.activeElement;try{a.id}catch(o){a=document.body}return t.wrap(s),(t[0]===a||e.contains(t[0],a))&&e(a).focus(),s=t.parent(),"static"===t.css("position")?(s.css({position:"relative"}),t.css({position:"relative"})):(e.extend(i,{position:t.css("position"),zIndex:t.css("z-index")}),e.each(["top","left","bottom","right"],function(e,s){i[s]=t.css(s),isNaN(parseInt(i[s],10))&&(i[s]="auto")}),t.css({position:"relative",top:0,left:0,right:"auto",bottom:"auto"})),t.css(n),s.css(i).show()},removeWrapper:function(t){var i=document.activeElement;return t.parent().is(".ui-effects-wrapper")&&(t.parent().replaceWith(t),(t[0]===i||e.contains(t[0],i))&&e(i).focus()),t},setTransition:function(t,i,s,n){return n=n||{},e.each(i,function(e,i){var a=t.cssUnit(i);a[0]>0&&(n[i]=a[0]*s+a[1])}),n}}),e.fn.extend({effect:function(){function i(t){function i(){e.isFunction(a)&&a.call(n[0]),e.isFunction(t)&&t()}var n=e(this),a=s.complete,r=s.mode;(n.is(":hidden")?"hide"===r:"show"===r)?(n[r](),i()):o.call(n[0],s,i)}var s=t.apply(this,arguments),n=s.mode,a=s.queue,o=e.effects.effect[s.effect];return e.fx.off||!o?n?this[n](s.duration,s.complete):this.each(function(){s.complete&&s.complete.call(this)}):a===!1?this.each(i):this.queue(a||"fx",i)},show:function(e){return function(s){if(i(s))return e.apply(this,arguments);var n=t.apply(this,arguments);return n.mode="show",this.effect.call(this,n)}}(e.fn.show),hide:function(e){return function(s){if(i(s))return e.apply(this,arguments);var n=t.apply(this,arguments);return n.mode="hide",this.effect.call(this,n)}}(e.fn.hide),toggle:function(e){return function(s){if(i(s)||"boolean"==typeof s)return e.apply(this,arguments);var n=t.apply(this,arguments);return n.mode="toggle",this.effect.call(this,n)}}(e.fn.toggle),cssUnit:function(t){var i=this.css(t),s=[];return e.each(["em","px","%","pt"],function(e,t){i.indexOf(t)>0&&(s=[parseFloat(i),t])}),s}})}(),function(){var t={};e.each(["Quad","Cubic","Quart","Quint","Expo"],function(e,i){t[i]=function(t){return Math.pow(t,e+2)}}),e.extend(t,{Sine:function(e){return 1-Math.cos(e*Math.PI/2)},Circ:function(e){return 1-Math.sqrt(1-e*e)},Elastic:function(e){return 0===e||1===e?e:-Math.pow(2,8*(e-1))*Math.sin((80*(e-1)-7.5)*Math.PI/15)},Back:function(e){return e*e*(3*e-2)},Bounce:function(e){for(var t,i=4;((t=Math.pow(2,--i))-1)/11>e;);return 1/Math.pow(4,3-i)-7.5625*Math.pow((3*t-2)/22-e,2)}}),e.each(t,function(t,i){e.easing["easeIn"+t]=i,e.easing["easeOut"+t]=function(e){return 1-i(1-e)},e.easing["easeInOut"+t]=function(e){return.5>e?i(2*e)/2:1-i(-2*e+2)/2}})}(),e.effects,e.effects.effect.blind=function(t,i){var s,n,a,o=e(this),r=/up|down|vertical/,h=/up|left|vertical|horizontal/,l=["position","top","bottom","left","right","height","width"],u=e.effects.setMode(o,t.mode||"hide"),d=t.direction||"up",c=r.test(d),p=c?"height":"width",f=c?"top":"left",m=h.test(d),g={},v="show"===u;o.parent().is(".ui-effects-wrapper")?e.effects.save(o.parent(),l):e.effects.save(o,l),o.show(),s=e.effects.createWrapper(o).css({overflow:"hidden"}),n=s[p](),a=parseFloat(s.css(f))||0,g[p]=v?n:0,m||(o.css(c?"bottom":"right",0).css(c?"top":"left","auto").css({position:"absolute"}),g[f]=v?a:n+a),v&&(s.css(p,0),m||s.css(f,a+n)),s.animate(g,{duration:t.duration,easing:t.easing,queue:!1,complete:function(){"hide"===u&&o.hide(),e.effects.restore(o,l),e.effects.removeWrapper(o),i()}})},e.effects.effect.bounce=function(t,i){var s,n,a,o=e(this),r=["position","top","bottom","left","right","height","width"],h=e.effects.setMode(o,t.mode||"effect"),l="hide"===h,u="show"===h,d=t.direction||"up",c=t.distance,p=t.times||5,f=2*p+(u||l?1:0),m=t.duration/f,g=t.easing,v="up"===d||"down"===d?"top":"left",y="up"===d||"left"===d,b=o.queue(),_=b.length;for((u||l)&&r.push("opacity"),e.effects.save(o,r),o.show(),e.effects.createWrapper(o),c||(c=o["top"===v?"outerHeight":"outerWidth"]()/3),u&&(a={opacity:1},a[v]=0,o.css("opacity",0).css(v,y?2*-c:2*c).animate(a,m,g)),l&&(c/=Math.pow(2,p-1)),a={},a[v]=0,s=0;p>s;s++)n={},n[v]=(y?"-=":"+=")+c,o.animate(n,m,g).animate(a,m,g),c=l?2*c:c/2;l&&(n={opacity:0},n[v]=(y?"-=":"+=")+c,o.animate(n,m,g)),o.queue(function(){l&&o.hide(),e.effects.restore(o,r),e.effects.removeWrapper(o),i()}),_>1&&b.splice.apply(b,[1,0].concat(b.splice(_,f+1))),o.dequeue()},e.effects.effect.clip=function(t,i){var s,n,a,o=e(this),r=["position","top","bottom","left","right","height","width"],h=e.effects.setMode(o,t.mode||"hide"),l="show"===h,u=t.direction||"vertical",d="vertical"===u,c=d?"height":"width",p=d?"top":"left",f={};e.effects.save(o,r),o.show(),s=e.effects.createWrapper(o).css({overflow:"hidden"}),n="IMG"===o[0].tagName?s:o,a=n[c](),l&&(n.css(c,0),n.css(p,a/2)),f[c]=l?a:0,f[p]=l?0:a/2,n.animate(f,{queue:!1,duration:t.duration,easing:t.easing,complete:function(){l||o.hide(),e.effects.restore(o,r),e.effects.removeWrapper(o),i()}})},e.effects.effect.drop=function(t,i){var s,n=e(this),a=["position","top","bottom","left","right","opacity","height","width"],o=e.effects.setMode(n,t.mode||"hide"),r="show"===o,h=t.direction||"left",l="up"===h||"down"===h?"top":"left",u="up"===h||"left"===h?"pos":"neg",d={opacity:r?1:0};e.effects.save(n,a),n.show(),e.effects.createWrapper(n),s=t.distance||n["top"===l?"outerHeight":"outerWidth"](!0)/2,r&&n.css("opacity",0).css(l,"pos"===u?-s:s),d[l]=(r?"pos"===u?"+=":"-=":"pos"===u?"-=":"+=")+s,n.animate(d,{queue:!1,duration:t.duration,easing:t.easing,complete:function(){"hide"===o&&n.hide(),e.effects.restore(n,a),e.effects.removeWrapper(n),i()}})},e.effects.effect.explode=function(t,i){function s(){b.push(this),b.length===d*c&&n()}function n(){p.css({visibility:"visible"}),e(b).remove(),m||p.hide(),i()}var a,o,r,h,l,u,d=t.pieces?Math.round(Math.sqrt(t.pieces)):3,c=d,p=e(this),f=e.effects.setMode(p,t.mode||"hide"),m="show"===f,g=p.show().css("visibility","hidden").offset(),v=Math.ceil(p.outerWidth()/c),y=Math.ceil(p.outerHeight()/d),b=[];for(a=0;d>a;a++)for(h=g.top+a*y,u=a-(d-1)/2,o=0;c>o;o++)r=g.left+o*v,l=o-(c-1)/2,p.clone().appendTo("body").wrap("
      ").css({position:"absolute",visibility:"visible",left:-o*v,top:-a*y}).parent().addClass("ui-effects-explode").css({position:"absolute",overflow:"hidden",width:v,height:y,left:r+(m?l*v:0),top:h+(m?u*y:0),opacity:m?0:1}).animate({left:r+(m?0:l*v),top:h+(m?0:u*y),opacity:m?1:0},t.duration||500,t.easing,s)},e.effects.effect.fade=function(t,i){var s=e(this),n=e.effects.setMode(s,t.mode||"toggle");s.animate({opacity:n},{queue:!1,duration:t.duration,easing:t.easing,complete:i})},e.effects.effect.fold=function(t,i){var s,n,a=e(this),o=["position","top","bottom","left","right","height","width"],r=e.effects.setMode(a,t.mode||"hide"),h="show"===r,l="hide"===r,u=t.size||15,d=/([0-9]+)%/.exec(u),c=!!t.horizFirst,p=h!==c,f=p?["width","height"]:["height","width"],m=t.duration/2,g={},v={};e.effects.save(a,o),a.show(),s=e.effects.createWrapper(a).css({overflow:"hidden"}),n=p?[s.width(),s.height()]:[s.height(),s.width()],d&&(u=parseInt(d[1],10)/100*n[l?0:1]),h&&s.css(c?{height:0,width:u}:{height:u,width:0}),g[f[0]]=h?n[0]:u,v[f[1]]=h?n[1]:0,s.animate(g,m,t.easing).animate(v,m,t.easing,function(){l&&a.hide(),e.effects.restore(a,o),e.effects.removeWrapper(a),i()})},e.effects.effect.highlight=function(t,i){var s=e(this),n=["backgroundImage","backgroundColor","opacity"],a=e.effects.setMode(s,t.mode||"show"),o={backgroundColor:s.css("backgroundColor")};"hide"===a&&(o.opacity=0),e.effects.save(s,n),s.show().css({backgroundImage:"none",backgroundColor:t.color||"#ffff99"}).animate(o,{queue:!1,duration:t.duration,easing:t.easing,complete:function(){"hide"===a&&s.hide(),e.effects.restore(s,n),i()}})},e.effects.effect.size=function(t,i){var s,n,a,o=e(this),r=["position","top","bottom","left","right","width","height","overflow","opacity"],h=["position","top","bottom","left","right","overflow","opacity"],l=["width","height","overflow"],u=["fontSize"],d=["borderTopWidth","borderBottomWidth","paddingTop","paddingBottom"],c=["borderLeftWidth","borderRightWidth","paddingLeft","paddingRight"],p=e.effects.setMode(o,t.mode||"effect"),f=t.restore||"effect"!==p,m=t.scale||"both",g=t.origin||["middle","center"],v=o.css("position"),y=f?r:h,b={height:0,width:0,outerHeight:0,outerWidth:0};"show"===p&&o.show(),s={height:o.height(),width:o.width(),outerHeight:o.outerHeight(),outerWidth:o.outerWidth()},"toggle"===t.mode&&"show"===p?(o.from=t.to||b,o.to=t.from||s):(o.from=t.from||("show"===p?b:s),o.to=t.to||("hide"===p?b:s)),a={from:{y:o.from.height/s.height,x:o.from.width/s.width},to:{y:o.to.height/s.height,x:o.to.width/s.width}},("box"===m||"both"===m)&&(a.from.y!==a.to.y&&(y=y.concat(d),o.from=e.effects.setTransition(o,d,a.from.y,o.from),o.to=e.effects.setTransition(o,d,a.to.y,o.to)),a.from.x!==a.to.x&&(y=y.concat(c),o.from=e.effects.setTransition(o,c,a.from.x,o.from),o.to=e.effects.setTransition(o,c,a.to.x,o.to))),("content"===m||"both"===m)&&a.from.y!==a.to.y&&(y=y.concat(u).concat(l),o.from=e.effects.setTransition(o,u,a.from.y,o.from),o.to=e.effects.setTransition(o,u,a.to.y,o.to)),e.effects.save(o,y),o.show(),e.effects.createWrapper(o),o.css("overflow","hidden").css(o.from),g&&(n=e.effects.getBaseline(g,s),o.from.top=(s.outerHeight-o.outerHeight())*n.y,o.from.left=(s.outerWidth-o.outerWidth())*n.x,o.to.top=(s.outerHeight-o.to.outerHeight)*n.y,o.to.left=(s.outerWidth-o.to.outerWidth)*n.x),o.css(o.from),("content"===m||"both"===m)&&(d=d.concat(["marginTop","marginBottom"]).concat(u),c=c.concat(["marginLeft","marginRight"]),l=r.concat(d).concat(c),o.find("*[width]").each(function(){var i=e(this),s={height:i.height(),width:i.width(),outerHeight:i.outerHeight(),outerWidth:i.outerWidth()}; +f&&e.effects.save(i,l),i.from={height:s.height*a.from.y,width:s.width*a.from.x,outerHeight:s.outerHeight*a.from.y,outerWidth:s.outerWidth*a.from.x},i.to={height:s.height*a.to.y,width:s.width*a.to.x,outerHeight:s.height*a.to.y,outerWidth:s.width*a.to.x},a.from.y!==a.to.y&&(i.from=e.effects.setTransition(i,d,a.from.y,i.from),i.to=e.effects.setTransition(i,d,a.to.y,i.to)),a.from.x!==a.to.x&&(i.from=e.effects.setTransition(i,c,a.from.x,i.from),i.to=e.effects.setTransition(i,c,a.to.x,i.to)),i.css(i.from),i.animate(i.to,t.duration,t.easing,function(){f&&e.effects.restore(i,l)})})),o.animate(o.to,{queue:!1,duration:t.duration,easing:t.easing,complete:function(){0===o.to.opacity&&o.css("opacity",o.from.opacity),"hide"===p&&o.hide(),e.effects.restore(o,y),f||("static"===v?o.css({position:"relative",top:o.to.top,left:o.to.left}):e.each(["top","left"],function(e,t){o.css(t,function(t,i){var s=parseInt(i,10),n=e?o.to.left:o.to.top;return"auto"===i?n+"px":s+n+"px"})})),e.effects.removeWrapper(o),i()}})},e.effects.effect.scale=function(t,i){var s=e(this),n=e.extend(!0,{},t),a=e.effects.setMode(s,t.mode||"effect"),o=parseInt(t.percent,10)||(0===parseInt(t.percent,10)?0:"hide"===a?0:100),r=t.direction||"both",h=t.origin,l={height:s.height(),width:s.width(),outerHeight:s.outerHeight(),outerWidth:s.outerWidth()},u={y:"horizontal"!==r?o/100:1,x:"vertical"!==r?o/100:1};n.effect="size",n.queue=!1,n.complete=i,"effect"!==a&&(n.origin=h||["middle","center"],n.restore=!0),n.from=t.from||("show"===a?{height:0,width:0,outerHeight:0,outerWidth:0}:l),n.to={height:l.height*u.y,width:l.width*u.x,outerHeight:l.outerHeight*u.y,outerWidth:l.outerWidth*u.x},n.fade&&("show"===a&&(n.from.opacity=0,n.to.opacity=1),"hide"===a&&(n.from.opacity=1,n.to.opacity=0)),s.effect(n)},e.effects.effect.puff=function(t,i){var s=e(this),n=e.effects.setMode(s,t.mode||"hide"),a="hide"===n,o=parseInt(t.percent,10)||150,r=o/100,h={height:s.height(),width:s.width(),outerHeight:s.outerHeight(),outerWidth:s.outerWidth()};e.extend(t,{effect:"scale",queue:!1,fade:!0,mode:n,complete:i,percent:a?o:100,from:a?h:{height:h.height*r,width:h.width*r,outerHeight:h.outerHeight*r,outerWidth:h.outerWidth*r}}),s.effect(t)},e.effects.effect.pulsate=function(t,i){var s,n=e(this),a=e.effects.setMode(n,t.mode||"show"),o="show"===a,r="hide"===a,h=o||"hide"===a,l=2*(t.times||5)+(h?1:0),u=t.duration/l,d=0,c=n.queue(),p=c.length;for((o||!n.is(":visible"))&&(n.css("opacity",0).show(),d=1),s=1;l>s;s++)n.animate({opacity:d},u,t.easing),d=1-d;n.animate({opacity:d},u,t.easing),n.queue(function(){r&&n.hide(),i()}),p>1&&c.splice.apply(c,[1,0].concat(c.splice(p,l+1))),n.dequeue()},e.effects.effect.shake=function(t,i){var s,n=e(this),a=["position","top","bottom","left","right","height","width"],o=e.effects.setMode(n,t.mode||"effect"),r=t.direction||"left",h=t.distance||20,l=t.times||3,u=2*l+1,d=Math.round(t.duration/u),c="up"===r||"down"===r?"top":"left",p="up"===r||"left"===r,f={},m={},g={},v=n.queue(),y=v.length;for(e.effects.save(n,a),n.show(),e.effects.createWrapper(n),f[c]=(p?"-=":"+=")+h,m[c]=(p?"+=":"-=")+2*h,g[c]=(p?"-=":"+=")+2*h,n.animate(f,d,t.easing),s=1;l>s;s++)n.animate(m,d,t.easing).animate(g,d,t.easing);n.animate(m,d,t.easing).animate(f,d/2,t.easing).queue(function(){"hide"===o&&n.hide(),e.effects.restore(n,a),e.effects.removeWrapper(n),i()}),y>1&&v.splice.apply(v,[1,0].concat(v.splice(y,u+1))),n.dequeue()},e.effects.effect.slide=function(t,i){var s,n=e(this),a=["position","top","bottom","left","right","width","height"],o=e.effects.setMode(n,t.mode||"show"),r="show"===o,h=t.direction||"left",l="up"===h||"down"===h?"top":"left",u="up"===h||"left"===h,d={};e.effects.save(n,a),n.show(),s=t.distance||n["top"===l?"outerHeight":"outerWidth"](!0),e.effects.createWrapper(n).css({overflow:"hidden"}),r&&n.css(l,u?isNaN(s)?"-"+s:-s:s),d[l]=(r?u?"+=":"-=":u?"-=":"+=")+s,n.animate(d,{queue:!1,duration:t.duration,easing:t.easing,complete:function(){"hide"===o&&n.hide(),e.effects.restore(n,a),e.effects.removeWrapper(n),i()}})},e.effects.effect.transfer=function(t,i){var s=e(this),n=e(t.to),a="fixed"===n.css("position"),o=e("body"),r=a?o.scrollTop():0,h=a?o.scrollLeft():0,l=n.offset(),u={top:l.top-r,left:l.left-h,height:n.innerHeight(),width:n.innerWidth()},d=s.offset(),c=e("
      ").appendTo(document.body).addClass(t.className).css({top:d.top-r,left:d.left-h,height:s.innerHeight(),width:s.innerWidth(),position:a?"fixed":"absolute"}).animate(u,t.duration,t.easing,function(){c.remove(),i()})},e.widget("ui.progressbar",{version:"1.11.4",options:{max:100,value:0,change:null,complete:null},min:0,_create:function(){this.oldValue=this.options.value=this._constrainedValue(),this.element.addClass("ui-progressbar ui-widget ui-widget-content ui-corner-all").attr({role:"progressbar","aria-valuemin":this.min}),this.valueDiv=e("
      ").appendTo(this.element),this._refreshValue()},_destroy:function(){this.element.removeClass("ui-progressbar ui-widget ui-widget-content ui-corner-all").removeAttr("role").removeAttr("aria-valuemin").removeAttr("aria-valuemax").removeAttr("aria-valuenow"),this.valueDiv.remove()},value:function(e){return void 0===e?this.options.value:(this.options.value=this._constrainedValue(e),this._refreshValue(),void 0)},_constrainedValue:function(e){return void 0===e&&(e=this.options.value),this.indeterminate=e===!1,"number"!=typeof e&&(e=0),this.indeterminate?!1:Math.min(this.options.max,Math.max(this.min,e))},_setOptions:function(e){var t=e.value;delete e.value,this._super(e),this.options.value=this._constrainedValue(t),this._refreshValue()},_setOption:function(e,t){"max"===e&&(t=Math.max(this.min,t)),"disabled"===e&&this.element.toggleClass("ui-state-disabled",!!t).attr("aria-disabled",t),this._super(e,t)},_percentage:function(){return this.indeterminate?100:100*(this.options.value-this.min)/(this.options.max-this.min)},_refreshValue:function(){var t=this.options.value,i=this._percentage();this.valueDiv.toggle(this.indeterminate||t>this.min).toggleClass("ui-corner-right",t===this.options.max).width(i.toFixed(0)+"%"),this.element.toggleClass("ui-progressbar-indeterminate",this.indeterminate),this.indeterminate?(this.element.removeAttr("aria-valuenow"),this.overlayDiv||(this.overlayDiv=e("
      ").appendTo(this.valueDiv))):(this.element.attr({"aria-valuemax":this.options.max,"aria-valuenow":t}),this.overlayDiv&&(this.overlayDiv.remove(),this.overlayDiv=null)),this.oldValue!==t&&(this.oldValue=t,this._trigger("change")),t===this.options.max&&this._trigger("complete")}}),e.widget("ui.selectable",e.ui.mouse,{version:"1.11.4",options:{appendTo:"body",autoRefresh:!0,distance:0,filter:"*",tolerance:"touch",selected:null,selecting:null,start:null,stop:null,unselected:null,unselecting:null},_create:function(){var t,i=this;this.element.addClass("ui-selectable"),this.dragged=!1,this.refresh=function(){t=e(i.options.filter,i.element[0]),t.addClass("ui-selectee"),t.each(function(){var t=e(this),i=t.offset();e.data(this,"selectable-item",{element:this,$element:t,left:i.left,top:i.top,right:i.left+t.outerWidth(),bottom:i.top+t.outerHeight(),startselected:!1,selected:t.hasClass("ui-selected"),selecting:t.hasClass("ui-selecting"),unselecting:t.hasClass("ui-unselecting")})})},this.refresh(),this.selectees=t.addClass("ui-selectee"),this._mouseInit(),this.helper=e("
      ")},_destroy:function(){this.selectees.removeClass("ui-selectee").removeData("selectable-item"),this.element.removeClass("ui-selectable ui-selectable-disabled"),this._mouseDestroy()},_mouseStart:function(t){var i=this,s=this.options;this.opos=[t.pageX,t.pageY],this.options.disabled||(this.selectees=e(s.filter,this.element[0]),this._trigger("start",t),e(s.appendTo).append(this.helper),this.helper.css({left:t.pageX,top:t.pageY,width:0,height:0}),s.autoRefresh&&this.refresh(),this.selectees.filter(".ui-selected").each(function(){var s=e.data(this,"selectable-item");s.startselected=!0,t.metaKey||t.ctrlKey||(s.$element.removeClass("ui-selected"),s.selected=!1,s.$element.addClass("ui-unselecting"),s.unselecting=!0,i._trigger("unselecting",t,{unselecting:s.element}))}),e(t.target).parents().addBack().each(function(){var s,n=e.data(this,"selectable-item");return n?(s=!t.metaKey&&!t.ctrlKey||!n.$element.hasClass("ui-selected"),n.$element.removeClass(s?"ui-unselecting":"ui-selected").addClass(s?"ui-selecting":"ui-unselecting"),n.unselecting=!s,n.selecting=s,n.selected=s,s?i._trigger("selecting",t,{selecting:n.element}):i._trigger("unselecting",t,{unselecting:n.element}),!1):void 0}))},_mouseDrag:function(t){if(this.dragged=!0,!this.options.disabled){var i,s=this,n=this.options,a=this.opos[0],o=this.opos[1],r=t.pageX,h=t.pageY;return a>r&&(i=r,r=a,a=i),o>h&&(i=h,h=o,o=i),this.helper.css({left:a,top:o,width:r-a,height:h-o}),this.selectees.each(function(){var i=e.data(this,"selectable-item"),l=!1;i&&i.element!==s.element[0]&&("touch"===n.tolerance?l=!(i.left>r||a>i.right||i.top>h||o>i.bottom):"fit"===n.tolerance&&(l=i.left>a&&r>i.right&&i.top>o&&h>i.bottom),l?(i.selected&&(i.$element.removeClass("ui-selected"),i.selected=!1),i.unselecting&&(i.$element.removeClass("ui-unselecting"),i.unselecting=!1),i.selecting||(i.$element.addClass("ui-selecting"),i.selecting=!0,s._trigger("selecting",t,{selecting:i.element}))):(i.selecting&&((t.metaKey||t.ctrlKey)&&i.startselected?(i.$element.removeClass("ui-selecting"),i.selecting=!1,i.$element.addClass("ui-selected"),i.selected=!0):(i.$element.removeClass("ui-selecting"),i.selecting=!1,i.startselected&&(i.$element.addClass("ui-unselecting"),i.unselecting=!0),s._trigger("unselecting",t,{unselecting:i.element}))),i.selected&&(t.metaKey||t.ctrlKey||i.startselected||(i.$element.removeClass("ui-selected"),i.selected=!1,i.$element.addClass("ui-unselecting"),i.unselecting=!0,s._trigger("unselecting",t,{unselecting:i.element})))))}),!1}},_mouseStop:function(t){var i=this;return this.dragged=!1,e(".ui-unselecting",this.element[0]).each(function(){var s=e.data(this,"selectable-item");s.$element.removeClass("ui-unselecting"),s.unselecting=!1,s.startselected=!1,i._trigger("unselected",t,{unselected:s.element})}),e(".ui-selecting",this.element[0]).each(function(){var s=e.data(this,"selectable-item");s.$element.removeClass("ui-selecting").addClass("ui-selected"),s.selecting=!1,s.selected=!0,s.startselected=!0,i._trigger("selected",t,{selected:s.element})}),this._trigger("stop",t),this.helper.remove(),!1}}),e.widget("ui.selectmenu",{version:"1.11.4",defaultElement:"",widgetEventPrefix:"spin",options:{culture:null,icons:{down:"ui-icon-triangle-1-s",up:"ui-icon-triangle-1-n"},incremental:!0,max:null,min:null,numberFormat:null,page:10,step:1,change:null,spin:null,start:null,stop:null},_create:function(){this._setOption("max",this.options.max),this._setOption("min",this.options.min),this._setOption("step",this.options.step),""!==this.value()&&this._value(this.element.val(),!0),this._draw(),this._on(this._events),this._refresh(),this._on(this.window,{beforeunload:function(){this.element.removeAttr("autocomplete")}})},_getCreateOptions:function(){var t={},i=this.element;return e.each(["min","max","step"],function(e,s){var n=i.attr(s);void 0!==n&&n.length&&(t[s]=n)}),t},_events:{keydown:function(e){this._start(e)&&this._keydown(e)&&e.preventDefault()},keyup:"_stop",focus:function(){this.previous=this.element.val()},blur:function(e){return this.cancelBlur?(delete this.cancelBlur,void 0):(this._stop(),this._refresh(),this.previous!==this.element.val()&&this._trigger("change",e),void 0)},mousewheel:function(e,t){if(t){if(!this.spinning&&!this._start(e))return!1;this._spin((t>0?1:-1)*this.options.step,e),clearTimeout(this.mousewheelTimer),this.mousewheelTimer=this._delay(function(){this.spinning&&this._stop(e)},100),e.preventDefault()}},"mousedown .ui-spinner-button":function(t){function i(){var e=this.element[0]===this.document[0].activeElement;e||(this.element.focus(),this.previous=s,this._delay(function(){this.previous=s}))}var s;s=this.element[0]===this.document[0].activeElement?this.previous:this.element.val(),t.preventDefault(),i.call(this),this.cancelBlur=!0,this._delay(function(){delete this.cancelBlur,i.call(this)}),this._start(t)!==!1&&this._repeat(null,e(t.currentTarget).hasClass("ui-spinner-up")?1:-1,t)},"mouseup .ui-spinner-button":"_stop","mouseenter .ui-spinner-button":function(t){return e(t.currentTarget).hasClass("ui-state-active")?this._start(t)===!1?!1:(this._repeat(null,e(t.currentTarget).hasClass("ui-spinner-up")?1:-1,t),void 0):void 0},"mouseleave .ui-spinner-button":"_stop"},_draw:function(){var e=this.uiSpinner=this.element.addClass("ui-spinner-input").attr("autocomplete","off").wrap(this._uiSpinnerHtml()).parent().append(this._buttonHtml());this.element.attr("role","spinbutton"),this.buttons=e.find(".ui-spinner-button").attr("tabIndex",-1).button().removeClass("ui-corner-all"),this.buttons.height()>Math.ceil(.5*e.height())&&e.height()>0&&e.height(e.height()),this.options.disabled&&this.disable()},_keydown:function(t){var i=this.options,s=e.ui.keyCode;switch(t.keyCode){case s.UP:return this._repeat(null,1,t),!0;case s.DOWN:return this._repeat(null,-1,t),!0;case s.PAGE_UP:return this._repeat(null,i.page,t),!0;case s.PAGE_DOWN:return this._repeat(null,-i.page,t),!0}return!1},_uiSpinnerHtml:function(){return""},_buttonHtml:function(){return""+""+""+""+""},_start:function(e){return this.spinning||this._trigger("start",e)!==!1?(this.counter||(this.counter=1),this.spinning=!0,!0):!1},_repeat:function(e,t,i){e=e||500,clearTimeout(this.timer),this.timer=this._delay(function(){this._repeat(40,t,i)},e),this._spin(t*this.options.step,i)},_spin:function(e,t){var i=this.value()||0;this.counter||(this.counter=1),i=this._adjustValue(i+e*this._increment(this.counter)),this.spinning&&this._trigger("spin",t,{value:i})===!1||(this._value(i),this.counter++)},_increment:function(t){var i=this.options.incremental;return i?e.isFunction(i)?i(t):Math.floor(t*t*t/5e4-t*t/500+17*t/200+1):1},_precision:function(){var e=this._precisionOf(this.options.step);return null!==this.options.min&&(e=Math.max(e,this._precisionOf(this.options.min))),e},_precisionOf:function(e){var t=""+e,i=t.indexOf(".");return-1===i?0:t.length-i-1},_adjustValue:function(e){var t,i,s=this.options;return t=null!==s.min?s.min:0,i=e-t,i=Math.round(i/s.step)*s.step,e=t+i,e=parseFloat(e.toFixed(this._precision())),null!==s.max&&e>s.max?s.max:null!==s.min&&s.min>e?s.min:e},_stop:function(e){this.spinning&&(clearTimeout(this.timer),clearTimeout(this.mousewheelTimer),this.counter=0,this.spinning=!1,this._trigger("stop",e))},_setOption:function(e,t){if("culture"===e||"numberFormat"===e){var i=this._parse(this.element.val());return this.options[e]=t,this.element.val(this._format(i)),void 0}("max"===e||"min"===e||"step"===e)&&"string"==typeof t&&(t=this._parse(t)),"icons"===e&&(this.buttons.first().find(".ui-icon").removeClass(this.options.icons.up).addClass(t.up),this.buttons.last().find(".ui-icon").removeClass(this.options.icons.down).addClass(t.down)),this._super(e,t),"disabled"===e&&(this.widget().toggleClass("ui-state-disabled",!!t),this.element.prop("disabled",!!t),this.buttons.button(t?"disable":"enable"))},_setOptions:h(function(e){this._super(e)}),_parse:function(e){return"string"==typeof e&&""!==e&&(e=window.Globalize&&this.options.numberFormat?Globalize.parseFloat(e,10,this.options.culture):+e),""===e||isNaN(e)?null:e},_format:function(e){return""===e?"":window.Globalize&&this.options.numberFormat?Globalize.format(e,this.options.numberFormat,this.options.culture):e},_refresh:function(){this.element.attr({"aria-valuemin":this.options.min,"aria-valuemax":this.options.max,"aria-valuenow":this._parse(this.element.val())})},isValid:function(){var e=this.value();return null===e?!1:e===this._adjustValue(e)},_value:function(e,t){var i;""!==e&&(i=this._parse(e),null!==i&&(t||(i=this._adjustValue(i)),e=this._format(i))),this.element.val(e),this._refresh()},_destroy:function(){this.element.removeClass("ui-spinner-input").prop("disabled",!1).removeAttr("autocomplete").removeAttr("role").removeAttr("aria-valuemin").removeAttr("aria-valuemax").removeAttr("aria-valuenow"),this.uiSpinner.replaceWith(this.element)},stepUp:h(function(e){this._stepUp(e)}),_stepUp:function(e){this._start()&&(this._spin((e||1)*this.options.step),this._stop())},stepDown:h(function(e){this._stepDown(e)}),_stepDown:function(e){this._start()&&(this._spin((e||1)*-this.options.step),this._stop())},pageUp:h(function(e){this._stepUp((e||1)*this.options.page)}),pageDown:h(function(e){this._stepDown((e||1)*this.options.page)}),value:function(e){return arguments.length?(h(this._value).call(this,e),void 0):this._parse(this.element.val())},widget:function(){return this.uiSpinner}}),e.widget("ui.tabs",{version:"1.11.4",delay:300,options:{active:null,collapsible:!1,event:"click",heightStyle:"content",hide:null,show:null,activate:null,beforeActivate:null,beforeLoad:null,load:null},_isLocal:function(){var e=/#.*$/;return function(t){var i,s;t=t.cloneNode(!1),i=t.href.replace(e,""),s=location.href.replace(e,"");try{i=decodeURIComponent(i)}catch(n){}try{s=decodeURIComponent(s)}catch(n){}return t.hash.length>1&&i===s}}(),_create:function(){var t=this,i=this.options;this.running=!1,this.element.addClass("ui-tabs ui-widget ui-widget-content ui-corner-all").toggleClass("ui-tabs-collapsible",i.collapsible),this._processTabs(),i.active=this._initialActive(),e.isArray(i.disabled)&&(i.disabled=e.unique(i.disabled.concat(e.map(this.tabs.filter(".ui-state-disabled"),function(e){return t.tabs.index(e)}))).sort()),this.active=this.options.active!==!1&&this.anchors.length?this._findActive(i.active):e(),this._refresh(),this.active.length&&this.load(i.active)},_initialActive:function(){var t=this.options.active,i=this.options.collapsible,s=location.hash.substring(1);return null===t&&(s&&this.tabs.each(function(i,n){return e(n).attr("aria-controls")===s?(t=i,!1):void 0}),null===t&&(t=this.tabs.index(this.tabs.filter(".ui-tabs-active"))),(null===t||-1===t)&&(t=this.tabs.length?0:!1)),t!==!1&&(t=this.tabs.index(this.tabs.eq(t)),-1===t&&(t=i?!1:0)),!i&&t===!1&&this.anchors.length&&(t=0),t},_getCreateEventData:function(){return{tab:this.active,panel:this.active.length?this._getPanelForTab(this.active):e()}},_tabKeydown:function(t){var i=e(this.document[0].activeElement).closest("li"),s=this.tabs.index(i),n=!0;if(!this._handlePageNav(t)){switch(t.keyCode){case e.ui.keyCode.RIGHT:case e.ui.keyCode.DOWN:s++;break;case e.ui.keyCode.UP:case e.ui.keyCode.LEFT:n=!1,s--;break;case e.ui.keyCode.END:s=this.anchors.length-1;break;case e.ui.keyCode.HOME:s=0;break;case e.ui.keyCode.SPACE:return t.preventDefault(),clearTimeout(this.activating),this._activate(s),void 0;case e.ui.keyCode.ENTER:return t.preventDefault(),clearTimeout(this.activating),this._activate(s===this.options.active?!1:s),void 0;default:return}t.preventDefault(),clearTimeout(this.activating),s=this._focusNextTab(s,n),t.ctrlKey||t.metaKey||(i.attr("aria-selected","false"),this.tabs.eq(s).attr("aria-selected","true"),this.activating=this._delay(function(){this.option("active",s)},this.delay))}},_panelKeydown:function(t){this._handlePageNav(t)||t.ctrlKey&&t.keyCode===e.ui.keyCode.UP&&(t.preventDefault(),this.active.focus())},_handlePageNav:function(t){return t.altKey&&t.keyCode===e.ui.keyCode.PAGE_UP?(this._activate(this._focusNextTab(this.options.active-1,!1)),!0):t.altKey&&t.keyCode===e.ui.keyCode.PAGE_DOWN?(this._activate(this._focusNextTab(this.options.active+1,!0)),!0):void 0},_findNextTab:function(t,i){function s(){return t>n&&(t=0),0>t&&(t=n),t}for(var n=this.tabs.length-1;-1!==e.inArray(s(),this.options.disabled);)t=i?t+1:t-1;return t},_focusNextTab:function(e,t){return e=this._findNextTab(e,t),this.tabs.eq(e).focus(),e},_setOption:function(e,t){return"active"===e?(this._activate(t),void 0):"disabled"===e?(this._setupDisabled(t),void 0):(this._super(e,t),"collapsible"===e&&(this.element.toggleClass("ui-tabs-collapsible",t),t||this.options.active!==!1||this._activate(0)),"event"===e&&this._setupEvents(t),"heightStyle"===e&&this._setupHeightStyle(t),void 0)},_sanitizeSelector:function(e){return e?e.replace(/[!"$%&'()*+,.\/:;<=>?@\[\]\^`{|}~]/g,"\\$&"):""},refresh:function(){var t=this.options,i=this.tablist.children(":has(a[href])");t.disabled=e.map(i.filter(".ui-state-disabled"),function(e){return i.index(e)}),this._processTabs(),t.active!==!1&&this.anchors.length?this.active.length&&!e.contains(this.tablist[0],this.active[0])?this.tabs.length===t.disabled.length?(t.active=!1,this.active=e()):this._activate(this._findNextTab(Math.max(0,t.active-1),!1)):t.active=this.tabs.index(this.active):(t.active=!1,this.active=e()),this._refresh()},_refresh:function(){this._setupDisabled(this.options.disabled),this._setupEvents(this.options.event),this._setupHeightStyle(this.options.heightStyle),this.tabs.not(this.active).attr({"aria-selected":"false","aria-expanded":"false",tabIndex:-1}),this.panels.not(this._getPanelForTab(this.active)).hide().attr({"aria-hidden":"true"}),this.active.length?(this.active.addClass("ui-tabs-active ui-state-active").attr({"aria-selected":"true","aria-expanded":"true",tabIndex:0}),this._getPanelForTab(this.active).show().attr({"aria-hidden":"false"})):this.tabs.eq(0).attr("tabIndex",0)},_processTabs:function(){var t=this,i=this.tabs,s=this.anchors,n=this.panels; +this.tablist=this._getList().addClass("ui-tabs-nav ui-helper-reset ui-helper-clearfix ui-widget-header ui-corner-all").attr("role","tablist").delegate("> li","mousedown"+this.eventNamespace,function(t){e(this).is(".ui-state-disabled")&&t.preventDefault()}).delegate(".ui-tabs-anchor","focus"+this.eventNamespace,function(){e(this).closest("li").is(".ui-state-disabled")&&this.blur()}),this.tabs=this.tablist.find("> li:has(a[href])").addClass("ui-state-default ui-corner-top").attr({role:"tab",tabIndex:-1}),this.anchors=this.tabs.map(function(){return e("a",this)[0]}).addClass("ui-tabs-anchor").attr({role:"presentation",tabIndex:-1}),this.panels=e(),this.anchors.each(function(i,s){var n,a,o,r=e(s).uniqueId().attr("id"),h=e(s).closest("li"),l=h.attr("aria-controls");t._isLocal(s)?(n=s.hash,o=n.substring(1),a=t.element.find(t._sanitizeSelector(n))):(o=h.attr("aria-controls")||e({}).uniqueId()[0].id,n="#"+o,a=t.element.find(n),a.length||(a=t._createPanel(o),a.insertAfter(t.panels[i-1]||t.tablist)),a.attr("aria-live","polite")),a.length&&(t.panels=t.panels.add(a)),l&&h.data("ui-tabs-aria-controls",l),h.attr({"aria-controls":o,"aria-labelledby":r}),a.attr("aria-labelledby",r)}),this.panels.addClass("ui-tabs-panel ui-widget-content ui-corner-bottom").attr("role","tabpanel"),i&&(this._off(i.not(this.tabs)),this._off(s.not(this.anchors)),this._off(n.not(this.panels)))},_getList:function(){return this.tablist||this.element.find("ol,ul").eq(0)},_createPanel:function(t){return e("
      ").attr("id",t).addClass("ui-tabs-panel ui-widget-content ui-corner-bottom").data("ui-tabs-destroy",!0)},_setupDisabled:function(t){e.isArray(t)&&(t.length?t.length===this.anchors.length&&(t=!0):t=!1);for(var i,s=0;i=this.tabs[s];s++)t===!0||-1!==e.inArray(s,t)?e(i).addClass("ui-state-disabled").attr("aria-disabled","true"):e(i).removeClass("ui-state-disabled").removeAttr("aria-disabled");this.options.disabled=t},_setupEvents:function(t){var i={};t&&e.each(t.split(" "),function(e,t){i[t]="_eventHandler"}),this._off(this.anchors.add(this.tabs).add(this.panels)),this._on(!0,this.anchors,{click:function(e){e.preventDefault()}}),this._on(this.anchors,i),this._on(this.tabs,{keydown:"_tabKeydown"}),this._on(this.panels,{keydown:"_panelKeydown"}),this._focusable(this.tabs),this._hoverable(this.tabs)},_setupHeightStyle:function(t){var i,s=this.element.parent();"fill"===t?(i=s.height(),i-=this.element.outerHeight()-this.element.height(),this.element.siblings(":visible").each(function(){var t=e(this),s=t.css("position");"absolute"!==s&&"fixed"!==s&&(i-=t.outerHeight(!0))}),this.element.children().not(this.panels).each(function(){i-=e(this).outerHeight(!0)}),this.panels.each(function(){e(this).height(Math.max(0,i-e(this).innerHeight()+e(this).height()))}).css("overflow","auto")):"auto"===t&&(i=0,this.panels.each(function(){i=Math.max(i,e(this).height("").height())}).height(i))},_eventHandler:function(t){var i=this.options,s=this.active,n=e(t.currentTarget),a=n.closest("li"),o=a[0]===s[0],r=o&&i.collapsible,h=r?e():this._getPanelForTab(a),l=s.length?this._getPanelForTab(s):e(),u={oldTab:s,oldPanel:l,newTab:r?e():a,newPanel:h};t.preventDefault(),a.hasClass("ui-state-disabled")||a.hasClass("ui-tabs-loading")||this.running||o&&!i.collapsible||this._trigger("beforeActivate",t,u)===!1||(i.active=r?!1:this.tabs.index(a),this.active=o?e():a,this.xhr&&this.xhr.abort(),l.length||h.length||e.error("jQuery UI Tabs: Mismatching fragment identifier."),h.length&&this.load(this.tabs.index(a),t),this._toggle(t,u))},_toggle:function(t,i){function s(){a.running=!1,a._trigger("activate",t,i)}function n(){i.newTab.closest("li").addClass("ui-tabs-active ui-state-active"),o.length&&a.options.show?a._show(o,a.options.show,s):(o.show(),s())}var a=this,o=i.newPanel,r=i.oldPanel;this.running=!0,r.length&&this.options.hide?this._hide(r,this.options.hide,function(){i.oldTab.closest("li").removeClass("ui-tabs-active ui-state-active"),n()}):(i.oldTab.closest("li").removeClass("ui-tabs-active ui-state-active"),r.hide(),n()),r.attr("aria-hidden","true"),i.oldTab.attr({"aria-selected":"false","aria-expanded":"false"}),o.length&&r.length?i.oldTab.attr("tabIndex",-1):o.length&&this.tabs.filter(function(){return 0===e(this).attr("tabIndex")}).attr("tabIndex",-1),o.attr("aria-hidden","false"),i.newTab.attr({"aria-selected":"true","aria-expanded":"true",tabIndex:0})},_activate:function(t){var i,s=this._findActive(t);s[0]!==this.active[0]&&(s.length||(s=this.active),i=s.find(".ui-tabs-anchor")[0],this._eventHandler({target:i,currentTarget:i,preventDefault:e.noop}))},_findActive:function(t){return t===!1?e():this.tabs.eq(t)},_getIndex:function(e){return"string"==typeof e&&(e=this.anchors.index(this.anchors.filter("[href$='"+e+"']"))),e},_destroy:function(){this.xhr&&this.xhr.abort(),this.element.removeClass("ui-tabs ui-widget ui-widget-content ui-corner-all ui-tabs-collapsible"),this.tablist.removeClass("ui-tabs-nav ui-helper-reset ui-helper-clearfix ui-widget-header ui-corner-all").removeAttr("role"),this.anchors.removeClass("ui-tabs-anchor").removeAttr("role").removeAttr("tabIndex").removeUniqueId(),this.tablist.unbind(this.eventNamespace),this.tabs.add(this.panels).each(function(){e.data(this,"ui-tabs-destroy")?e(this).remove():e(this).removeClass("ui-state-default ui-state-active ui-state-disabled ui-corner-top ui-corner-bottom ui-widget-content ui-tabs-active ui-tabs-panel").removeAttr("tabIndex").removeAttr("aria-live").removeAttr("aria-busy").removeAttr("aria-selected").removeAttr("aria-labelledby").removeAttr("aria-hidden").removeAttr("aria-expanded").removeAttr("role")}),this.tabs.each(function(){var t=e(this),i=t.data("ui-tabs-aria-controls");i?t.attr("aria-controls",i).removeData("ui-tabs-aria-controls"):t.removeAttr("aria-controls")}),this.panels.show(),"content"!==this.options.heightStyle&&this.panels.css("height","")},enable:function(t){var i=this.options.disabled;i!==!1&&(void 0===t?i=!1:(t=this._getIndex(t),i=e.isArray(i)?e.map(i,function(e){return e!==t?e:null}):e.map(this.tabs,function(e,i){return i!==t?i:null})),this._setupDisabled(i))},disable:function(t){var i=this.options.disabled;if(i!==!0){if(void 0===t)i=!0;else{if(t=this._getIndex(t),-1!==e.inArray(t,i))return;i=e.isArray(i)?e.merge([t],i).sort():[t]}this._setupDisabled(i)}},load:function(t,i){t=this._getIndex(t);var s=this,n=this.tabs.eq(t),a=n.find(".ui-tabs-anchor"),o=this._getPanelForTab(n),r={tab:n,panel:o},h=function(e,t){"abort"===t&&s.panels.stop(!1,!0),n.removeClass("ui-tabs-loading"),o.removeAttr("aria-busy"),e===s.xhr&&delete s.xhr};this._isLocal(a[0])||(this.xhr=e.ajax(this._ajaxSettings(a,i,r)),this.xhr&&"canceled"!==this.xhr.statusText&&(n.addClass("ui-tabs-loading"),o.attr("aria-busy","true"),this.xhr.done(function(e,t,n){setTimeout(function(){o.html(e),s._trigger("load",i,r),h(n,t)},1)}).fail(function(e,t){setTimeout(function(){h(e,t)},1)})))},_ajaxSettings:function(t,i,s){var n=this;return{url:t.attr("href"),beforeSend:function(t,a){return n._trigger("beforeLoad",i,e.extend({jqXHR:t,ajaxSettings:a},s))}}},_getPanelForTab:function(t){var i=e(t).attr("aria-controls");return this.element.find(this._sanitizeSelector("#"+i))}}),e.widget("ui.tooltip",{version:"1.11.4",options:{content:function(){var t=e(this).attr("title")||"";return e("").text(t).html()},hide:!0,items:"[title]:not([disabled])",position:{my:"left top+15",at:"left bottom",collision:"flipfit flip"},show:!0,tooltipClass:null,track:!1,close:null,open:null},_addDescribedBy:function(t,i){var s=(t.attr("aria-describedby")||"").split(/\s+/);s.push(i),t.data("ui-tooltip-id",i).attr("aria-describedby",e.trim(s.join(" ")))},_removeDescribedBy:function(t){var i=t.data("ui-tooltip-id"),s=(t.attr("aria-describedby")||"").split(/\s+/),n=e.inArray(i,s);-1!==n&&s.splice(n,1),t.removeData("ui-tooltip-id"),s=e.trim(s.join(" ")),s?t.attr("aria-describedby",s):t.removeAttr("aria-describedby")},_create:function(){this._on({mouseover:"open",focusin:"open"}),this.tooltips={},this.parents={},this.options.disabled&&this._disable(),this.liveRegion=e("
      ").attr({role:"log","aria-live":"assertive","aria-relevant":"additions"}).addClass("ui-helper-hidden-accessible").appendTo(this.document[0].body)},_setOption:function(t,i){var s=this;return"disabled"===t?(this[i?"_disable":"_enable"](),this.options[t]=i,void 0):(this._super(t,i),"content"===t&&e.each(this.tooltips,function(e,t){s._updateContent(t.element)}),void 0)},_disable:function(){var t=this;e.each(this.tooltips,function(i,s){var n=e.Event("blur");n.target=n.currentTarget=s.element[0],t.close(n,!0)}),this.element.find(this.options.items).addBack().each(function(){var t=e(this);t.is("[title]")&&t.data("ui-tooltip-title",t.attr("title")).removeAttr("title")})},_enable:function(){this.element.find(this.options.items).addBack().each(function(){var t=e(this);t.data("ui-tooltip-title")&&t.attr("title",t.data("ui-tooltip-title"))})},open:function(t){var i=this,s=e(t?t.target:this.element).closest(this.options.items);s.length&&!s.data("ui-tooltip-id")&&(s.attr("title")&&s.data("ui-tooltip-title",s.attr("title")),s.data("ui-tooltip-open",!0),t&&"mouseover"===t.type&&s.parents().each(function(){var t,s=e(this);s.data("ui-tooltip-open")&&(t=e.Event("blur"),t.target=t.currentTarget=this,i.close(t,!0)),s.attr("title")&&(s.uniqueId(),i.parents[this.id]={element:this,title:s.attr("title")},s.attr("title",""))}),this._registerCloseHandlers(t,s),this._updateContent(s,t))},_updateContent:function(e,t){var i,s=this.options.content,n=this,a=t?t.type:null;return"string"==typeof s?this._open(t,e,s):(i=s.call(e[0],function(i){n._delay(function(){e.data("ui-tooltip-open")&&(t&&(t.type=a),this._open(t,e,i))})}),i&&this._open(t,e,i),void 0)},_open:function(t,i,s){function n(e){l.of=e,o.is(":hidden")||o.position(l)}var a,o,r,h,l=e.extend({},this.options.position);if(s){if(a=this._find(i))return a.tooltip.find(".ui-tooltip-content").html(s),void 0;i.is("[title]")&&(t&&"mouseover"===t.type?i.attr("title",""):i.removeAttr("title")),a=this._tooltip(i),o=a.tooltip,this._addDescribedBy(i,o.attr("id")),o.find(".ui-tooltip-content").html(s),this.liveRegion.children().hide(),s.clone?(h=s.clone(),h.removeAttr("id").find("[id]").removeAttr("id")):h=s,e("
      ").html(h).appendTo(this.liveRegion),this.options.track&&t&&/^mouse/.test(t.type)?(this._on(this.document,{mousemove:n}),n(t)):o.position(e.extend({of:i},this.options.position)),o.hide(),this._show(o,this.options.show),this.options.show&&this.options.show.delay&&(r=this.delayedShow=setInterval(function(){o.is(":visible")&&(n(l.of),clearInterval(r))},e.fx.interval)),this._trigger("open",t,{tooltip:o})}},_registerCloseHandlers:function(t,i){var s={keyup:function(t){if(t.keyCode===e.ui.keyCode.ESCAPE){var s=e.Event(t);s.currentTarget=i[0],this.close(s,!0)}}};i[0]!==this.element[0]&&(s.remove=function(){this._removeTooltip(this._find(i).tooltip)}),t&&"mouseover"!==t.type||(s.mouseleave="close"),t&&"focusin"!==t.type||(s.focusout="close"),this._on(!0,i,s)},close:function(t){var i,s=this,n=e(t?t.currentTarget:this.element),a=this._find(n);return a?(i=a.tooltip,a.closing||(clearInterval(this.delayedShow),n.data("ui-tooltip-title")&&!n.attr("title")&&n.attr("title",n.data("ui-tooltip-title")),this._removeDescribedBy(n),a.hiding=!0,i.stop(!0),this._hide(i,this.options.hide,function(){s._removeTooltip(e(this))}),n.removeData("ui-tooltip-open"),this._off(n,"mouseleave focusout keyup"),n[0]!==this.element[0]&&this._off(n,"remove"),this._off(this.document,"mousemove"),t&&"mouseleave"===t.type&&e.each(this.parents,function(t,i){e(i.element).attr("title",i.title),delete s.parents[t]}),a.closing=!0,this._trigger("close",t,{tooltip:i}),a.hiding||(a.closing=!1)),void 0):(n.removeData("ui-tooltip-open"),void 0)},_tooltip:function(t){var i=e("
      ").attr("role","tooltip").addClass("ui-tooltip ui-widget ui-corner-all ui-widget-content "+(this.options.tooltipClass||"")),s=i.uniqueId().attr("id");return e("
      ").addClass("ui-tooltip-content").appendTo(i),i.appendTo(this.document[0].body),this.tooltips[s]={element:t,tooltip:i}},_find:function(e){var t=e.data("ui-tooltip-id");return t?this.tooltips[t]:null},_removeTooltip:function(e){e.remove(),delete this.tooltips[e.attr("id")]},_destroy:function(){var t=this;e.each(this.tooltips,function(i,s){var n=e.Event("blur"),a=s.element;n.target=n.currentTarget=a[0],t.close(n,!0),e("#"+i).remove(),a.data("ui-tooltip-title")&&(a.attr("title")||a.attr("title",a.data("ui-tooltip-title")),a.removeData("ui-tooltip-title"))}),this.liveRegion.remove()}})}); \ No newline at end of file diff --git a/static/js/reader/epub.min.js b/static/js/reader/epub.min.js deleted file mode 100644 index fff280087..000000000 --- a/static/js/reader/epub.min.js +++ /dev/null @@ -1,8 +0,0 @@ -/*! - * @overview RSVP - a tiny implementation of Promises/A+. - * @copyright Copyright (c) 2016 Yehuda Katz, Tom Dale, Stefan Penner and contributors - * @license Licensed under MIT license - * See https://raw.githubusercontent.com/tildeio/rsvp.js/master/LICENSE - * @version 3.5.0 - */ -"use strict";!function(a,b){"object"==typeof exports&&"undefined"!=typeof module?b(exports):"function"==typeof define&&define.amd?define(["exports"],b):b(a.RSVP=a.RSVP||{})}(this,function(a){function b(a,b){for(var c=0,d=a.length;c1)throw new Error("Second argument not supported");if("object"!=typeof a)throw new TypeError("Argument must be an object");return h.prototype=a,new h},Aa=[],Ba=void 0,Ca=1,Da=2,Ea=new y,Fa=new y;E.prototype._validateInput=function(a){return xa(a)},E.prototype._validationError=function(){return new Error("Array Methods must be provided an Array")},E.prototype._init=function(){this._result=new Array(this.length)},E.prototype._enumerate=function(){for(var a=this.length,b=this.promise,c=this._input,d=0;b._state===Ba&&d=i)l.resolve();else{if(c&&c.cancelled)return e.remove(),this.element.removeChild(f),void l.reject(new Error("User cancelled"));h=g,b=new EPUBJS.Chapter(this.spine[h],this.store),e.displayChapter(b,this.globalLayoutProperties).then(function(a){e.pageMap.forEach(function(a){j+=1,d.push({cfi:a.start,page:j})}),e.pageMap.length%2>0&&e.spreads&&(j+=1,d.push({cfi:e.pageMap[e.pageMap.length-1].end,page:j})),setTimeout(function(){k(l)},1)})}return l.promise}.bind(this);k().then(function(){e.remove(),this.element.removeChild(f),g.resolve(d)}.bind(this),function(a){g.reject(a)});return g.promise},EPUBJS.Book.prototype.generatePagination=function(a,b,c){var d=this,e=new RSVP.defer;return this.ready.spine.promise.then(function(){d.generatePageList(a,b,c).then(function(a){d.pageList=d.contents.pageList=a,d.pagination.process(a),d.ready.pageList.resolve(d.pageList),e.resolve(d.pageList)},function(a){e.reject(a)})}),e.promise},EPUBJS.Book.prototype.loadPagination=function(a){var b;return b="string"==typeof a?JSON.parse(a):a,b&&b.length&&(this.pageList=b,this.pagination.process(this.pageList),this.ready.pageList.resolve(this.pageList)),this.pageList},EPUBJS.Book.prototype.getPageList=function(){return this.ready.pageList.promise},EPUBJS.Book.prototype.getMetadata=function(){return this.ready.metadata.promise},EPUBJS.Book.prototype.getToc=function(){return this.ready.toc.promise},EPUBJS.Book.prototype.networkListeners=function(){var a=this;window.addEventListener("offline",function(b){a.online=!1,a.settings.storage&&a.fromStorage(!0),a.trigger("book:offline")},!1),window.addEventListener("online",function(b){a.online=!0,a.settings.storage&&a.fromStorage(!1),a.trigger("book:online")},!1)},EPUBJS.Book.prototype.listenToRenderer=function(a){var b=this;a.Events.forEach(function(c){a.on(c,function(a){b.trigger(c,a)})}),a.on("renderer:visibleRangeChanged",function(a){var b,c,d,e=[];this.pageList.length>0&&(b=this.pagination.pageFromCfi(a.start),d=this.pagination.percentageFromPage(b),e.push(b),a.end&&(c=this.pagination.pageFromCfi(a.end),e.push(c)),this.trigger("book:pageChanged",{anchorPage:b,percentage:d,pageRange:e}))}.bind(this)),a.on("render:loaded",this.loadChange.bind(this))},EPUBJS.Book.prototype.loadChange=function(a){var b,c,d=EPUBJS.core.uri(a),e=EPUBJS.core.uri(this.currentChapter.absolute);d.path!=e.path?(console.warn("Miss Match",d.path,this.currentChapter.absolute),b=this.spineIndexByURL[d.filename],c=new EPUBJS.Chapter(this.spine[b],this.store),this.currentChapter=c,this.renderer.currentChapter=c,this.renderer.afterLoad(this.renderer.render.docEl),this.renderer.beforeDisplay(function(){this.renderer.afterDisplay()}.bind(this))):this._rendering||this.renderer.reformat()},EPUBJS.Book.prototype.unlistenToRenderer=function(a){a.Events.forEach(function(b){a.off(b)})},EPUBJS.Book.prototype.coverUrl=function(){var a=this.ready.cover.promise.then(function(a){return this.settings.fromStorage?this.store.getUrl(this.contents.cover):this.settings.contained?this.zip.getUrl(this.contents.cover):this.contents.cover}.bind(this));return a.then(function(a){this.cover=a}.bind(this)),a},EPUBJS.Book.prototype.loadXml=function(a){return this.settings.fromStorage?this.store.getXml(a,this.settings.encoding):this.settings.contained?this.zip.getXml(a,this.settings.encoding):EPUBJS.core.request(a,"xml",this.settings.withCredentials)},EPUBJS.Book.prototype.urlFrom=function(a){var b,c=EPUBJS.core.uri(a),d=c.protocol,e="/"==c.path[0],f=window.location,g=f.origin||f.protocol+"//"+f.host,h=document.getElementsByTagName("base");return h.length&&(b=h[0].href),c.protocol?c.origin+c.path:!d&&e?(b||g)+c.path:d||e?void 0:EPUBJS.core.resolveUrl(b||f.pathname,c.path)},EPUBJS.Book.prototype.unarchive=function(a){return this.zip=new EPUBJS.Unarchiver,this.store=this.zip,this.zip.open(a)},EPUBJS.Book.prototype.isContained=function(a){if(a instanceof ArrayBuffer)return!0;var b=EPUBJS.core.uri(a);return!(!b.extension||"epub"!=b.extension&&"zip"!=b.extension)},EPUBJS.Book.prototype.isSaved=function(a){var b;return!!localStorage&&(b=localStorage.getItem(a),!(!localStorage||null===b))},EPUBJS.Book.prototype.generateBookKey=function(a){return"epubjs:"+EPUBJS.VERSION+":"+window.location.host+":"+a},EPUBJS.Book.prototype.saveContents=function(){if(!localStorage)return!1;localStorage.setItem(this.settings.bookKey,JSON.stringify(this.contents))},EPUBJS.Book.prototype.removeSavedContents=function(){if(!localStorage)return!1;localStorage.removeItem(this.settings.bookKey)},EPUBJS.Book.prototype.renderTo=function(a){var b=this;if(EPUBJS.core.isElement(a))this.element=a;else{if("string"!=typeof a)return void console.error("Not an Element");this.element=EPUBJS.core.getEl(a)}return this.opened.then(function(){return b.renderer.initialize(b.element,b.settings.width,b.settings.height),b.metadata.direction&&b.renderer.setDirection(b.metadata.direction),b._rendered(),b.startDisplay()})},EPUBJS.Book.prototype.startDisplay=function(){return this.settings.goto?this.goto(this.settings.goto):this.settings.previousLocationCfi?this.gotoCfi(this.settings.previousLocationCfi):this.displayChapter(this.spinePos,this.settings.displayLastPage)},EPUBJS.Book.prototype.restore=function(a){var b,c=this,d=["manifest","spine","metadata","cover","toc","spineNodeIndex","spineIndexByURL","globalLayoutProperties"],e=!1,f=this.generateBookKey(a),g=localStorage.getItem(f),h=d.length;if(this.settings.clearSaved&&(e=!0),!e&&"undefined"!=g&&null!==g)for(c.contents=JSON.parse(g),b=0;b=this.spine.length)&&(console.warn("Not A Valid Location"),f=0,b=!1,e=!1),g=new EPUBJS.Chapter(this.spine[f],this.store),this._rendering=!0,this._needsAssetReplacement()&&g.registerHook("beforeChapterRender",[EPUBJS.replace.head,EPUBJS.replace.resources,EPUBJS.replace.posters,EPUBJS.replace.svg],!0),h.currentChapter=g,d=h.renderer.displayChapter(g,this.globalLayoutProperties),e?h.renderer.gotoCfi(e):b&&h.renderer.lastPage(),d.then(function(a){h.spinePos=f,i.resolve(h.renderer),h.settings.fromStorage===!1&&h.settings.contained===!1&&h.preloadNextChapter(),h._rendering=!1,h._displayQ.dequeue(),0===h._displayQ.length()&&h._gotoQ.dequeue()},function(a){console.error("Could not load Chapter: "+g.absolute,a),h.trigger("book:chapterLoadFailed",g.absolute),h._rendering=!1,i.reject(a)}),i.promise):(this._q.enqueue("displayChapter",arguments),i.reject({message:"Rendering",stack:(new Error).stack}),i.promise)},EPUBJS.Book.prototype.nextPage=function(a){var a=a||new RSVP.defer;return this.isRendered?this.renderer.nextPage()?(a.resolve(!0),a.promise):this.nextChapter(a):(this._q.enqueue("nextPage",[a]),a.promise)},EPUBJS.Book.prototype.prevPage=function(a){var a=a||new RSVP.defer;return this.isRendered?this.renderer.prevPage()?(a.resolve(!0),a.promise):this.prevChapter(a):(this._q.enqueue("prevPage",[a]),a.promise)},EPUBJS.Book.prototype.nextChapter=function(a){var a=a||new RSVP.defer;if(this.spinePos0){for(var b=this.spinePos-1;this.spine[b]&&this.spine[b].linear&&"no"==this.spine[b].linear;)b--;if(b>=0)return this.displayChapter(b,!0,a)}return this.trigger("book:atStart"),a.resolve(!0),a.promise},EPUBJS.Book.prototype.getCurrentLocationCfi=function(){return!!this.isRendered&&this.renderer.currentLocationCfi},EPUBJS.Book.prototype.goto=function(a){return 0===a.indexOf("epubcfi(")?this.gotoCfi(a):a.indexOf("%")===a.length-1?this.gotoPercentage(parseInt(a.substring(0,a.length-1))/100):"number"==typeof a||isNaN(a)===!1?this.gotoPage(a):this.gotoHref(a)},EPUBJS.Book.prototype.gotoCfi=function(a,b){var c,d,e,f,g,h=b||new RSVP.defer;return this.isRendered?this._moving||this._rendering?(console.warn("Renderer is moving"),this._gotoQ.enqueue("gotoCfi",[a,h]),!1):(c=new EPUBJS.EpubCFI(a),(d=c.spinePos)!=-1&&(e=this.spine[d],f=h.promise,this._moving=!0,this.currentChapter&&this.spinePos===d?(this.renderer.gotoCfi(c),this._moving=!1,h.resolve(this.renderer.currentLocationCfi)):(e&&d!=-1||(d=0,e=this.spine[d]),g=this.displayChapter(a),g.then(function(a){this._moving=!1,h.resolve(a.currentLocationCfi)}.bind(this),function(){this._moving=!1}.bind(this))),f.then(function(){this._gotoQ.dequeue()}.bind(this)),f)):(console.warn("Not yet Rendered"),this.settings.previousLocationCfi=a,!1)},EPUBJS.Book.prototype.gotoHref=function(a,b){var c,d,e,f,g,h=b||new RSVP.defer;return this.isRendered?this._moving||this._rendering?(this._gotoQ.enqueue("gotoHref",[a,h]),!1):(c=a.split("#"),d=c[0],e=c[1]||!1,f=d.search("://")==-1?d.replace(EPUBJS.core.uri(this.settings.contentsPath).path,""):d.replace(this.settings.contentsPath,""),g=this.spineIndexByURL[f],d||(g=this.currentChapter?this.currentChapter.spinePos:0),"number"==typeof g&&(this.currentChapter&&g==this.currentChapter.spinePos?(e?this.renderer.section(e):this.renderer.firstPage(),h.resolve(this.renderer.currentLocationCfi),h.promise.then(function(){this._gotoQ.dequeue()}.bind(this)),h.promise):this.displayChapter(g).then(function(){e&&this.renderer.section(e),h.resolve(this.renderer.currentLocationCfi)}.bind(this)))):(this.settings.goto=a,!1)},EPUBJS.Book.prototype.gotoPage=function(a){var b=this.pagination.cfiFromPage(a);return this.gotoCfi(b)},EPUBJS.Book.prototype.gotoPercentage=function(a){var b=this.pagination.pageFromPercentage(a);return this.gotoPage(b)},EPUBJS.Book.prototype.preloadNextChapter=function(){var a,b=this.spinePos+1;if(b>=this.spine.length)return!1;(a=new EPUBJS.Chapter(this.spine[b]))&&EPUBJS.core.request(a.absolute)},EPUBJS.Book.prototype.storeOffline=function(){var a=this,b=EPUBJS.core.values(this.manifest);return this.store.put(b).then(function(){a.settings.stored=!0,a.trigger("book:stored")})},EPUBJS.Book.prototype.availableOffline=function(){return this.settings.stored>0},EPUBJS.Book.prototype.toStorage=function(){var a=this.settings.bookKey;this.store.isStored(a).then(function(b){return b===!0?(this.settings.stored=!0,!0):this.storeOffline().then(function(){this.store.token(a,!0)}.bind(this))}.bind(this))},EPUBJS.Book.prototype.fromStorage=function(a){EPUBJS.replace.head,EPUBJS.replace.resources,EPUBJS.replace.posters,EPUBJS.replace.svg;this.contained||this.settings.contained||(this.online&&this.opened.then(this.toStorage.bind(this)),this.store&&this.settings.fromStorage&&a===!1?(this.settings.fromStorage=!1,this.store.off("offline"),this.store=!1):this.settings.fromStorage||(this.store=new EPUBJS.Storage(this.settings.credentials),this.store.on("offline",function(a){a?(this.offline=!0,this.settings.fromStorage=!0,this.trigger("book:offline")):(this.offline=!1,this.settings.fromStorage=!1,this.trigger("book:online"))}.bind(this))))},EPUBJS.Book.prototype.setStyle=function(a,b,c){var d=["color","background","background-color"];if(!this.isRendered)return this._q.enqueue("setStyle",arguments);this.settings.styles[a]=b,this.renderer.setStyle(a,b,c),d.indexOf(a)===-1&&this.renderer.reformat()},EPUBJS.Book.prototype.removeStyle=function(a){if(!this.isRendered)return this._q.enqueue("removeStyle",arguments);this.renderer.removeStyle(a),this.renderer.reformat(),delete this.settings.styles[a]},EPUBJS.Book.prototype.resetClasses=function(a){if(!this.isRendered)return this._q.enqueue("setClasses",arguments);a.constructor===String&&(a=[a]),this.settings.classes=a,this.renderer.setClasses(this.settings.classes),this.renderer.reformat()},EPUBJS.Book.prototype.addClass=function(a){if(!this.isRendered)return this._q.enqueue("addClass",arguments);this.settings.classes.indexOf(a)==-1&&this.settings.classes.push(a),this.renderer.setClasses(this.settings.classes),this.renderer.reformat()},EPUBJS.Book.prototype.removeClass=function(a){if(!this.isRendered)return this._q.enqueue("removeClass",arguments);var b=this.settings.classes.indexOf(a);b!=-1&&(delete this.settings.classes[b],this.renderer.setClasses(this.settings.classes),this.renderer.reformat())},EPUBJS.Book.prototype.addHeadTag=function(a,b){if(!this.isRendered)return this._q.enqueue("addHeadTag",arguments);this.settings.headTags[a]=b},EPUBJS.Book.prototype.useSpreads=function(a){console.warn("useSpreads is deprecated, use forceSingle or set a layoutOveride instead"),a===!1?this.forceSingle(!0):this.forceSingle(!1)},EPUBJS.Book.prototype.forceSingle=function(a){var b=void 0===a||a;this.renderer.forceSingle(b),this.settings.forceSingle=b,this.isRendered&&this.renderer.reformat()},EPUBJS.Book.prototype.setMinSpreadWidth=function(a){this.settings.minSpreadWidth=a,this.isRendered&&(this.renderer.setMinSpreadWidth(this.settings.minSpreadWidth),this.renderer.reformat())},EPUBJS.Book.prototype.setGap=function(a){this.settings.gap=a,this.isRendered&&(this.renderer.setGap(this.settings.gap),this.renderer.reformat())},EPUBJS.Book.prototype.chapter=function(a){var b,c,d=this.spineIndexByURL[a];return d&&(b=this.spine[d],c=new EPUBJS.Chapter(b,this.store,this.settings.withCredentials),c.load()),c},EPUBJS.Book.prototype.unload=function(){this.settings.restore&&localStorage&&this.saveContents(),this.unlistenToRenderer(this.renderer),this.trigger("book:unload")},EPUBJS.Book.prototype.destroy=function(){window.removeEventListener("beforeunload",this.unload),this.currentChapter&&this.currentChapter.unload(),this.unload(),this.renderer&&this.renderer.remove()},EPUBJS.Book.prototype._ready=function(){this.trigger("book:ready")},EPUBJS.Book.prototype._rendered=function(a){this.isRendered=!0,this.trigger("book:rendered"),this._q.flush()},EPUBJS.Book.prototype.applyStyles=function(a,b){a.applyStyles(this.settings.styles),b()},EPUBJS.Book.prototype.applyClasses=function(a,b){a.setClasses(this.settings.classes),b()},EPUBJS.Book.prototype.applyHeadTags=function(a,b){a.applyHeadTags(this.settings.headTags),b()},EPUBJS.Book.prototype._registerReplacements=function(a){a.registerHook("beforeChapterDisplay",this.applyStyles.bind(this,a),!0),a.registerHook("beforeChapterDisplay",this.applyHeadTags.bind(this,a),!0),a.registerHook("beforeChapterDisplay",this.applyClasses.bind(this,a),!0),a.registerHook("beforeChapterDisplay",EPUBJS.replace.hrefs.bind(this),!0)},EPUBJS.Book.prototype._needsAssetReplacement=function(){return!!this.settings.fromStorage||!!this.settings.contained},EPUBJS.Book.prototype.parseLayoutProperties=function(a){return{layout:this.settings.layoutOveride&&this.settings.layoutOveride.layout||a.layout||"reflowable",spread:this.settings.layoutOveride&&this.settings.layoutOveride.spread||a.spread||"auto",orientation:this.settings.layoutOveride&&this.settings.layoutOveride.orientation||a.orientation||"auto"}},RSVP.EventTarget.mixin(EPUBJS.Book.prototype),RSVP.on("error",function(a){console.error(a)}),RSVP.configure("instrument",!0),EPUBJS.Chapter=function(a,b,c){this.href=a.href,this.absolute=a.url,this.id=a.id,this.spinePos=a.index,this.cfiBase=a.cfiBase,this.properties=a.properties,this.manifestProperties=a.manifestProperties,this.linear=a.linear,this.pages=1,this.store=b,this.credentials=c,this.epubcfi=new EPUBJS.EpubCFI,this.deferred=new RSVP.defer,this.loaded=this.deferred.promise,EPUBJS.Hooks.mixin(this),this.getHooks("beforeChapterRender"),this.caches={}},EPUBJS.Chapter.prototype.load=function(a,b){var c,d=a||this.store,e=b||this.credentials;return c=d?d.getXml(this.absolute):EPUBJS.core.request(this.absolute,!1,e),c.then(function(a){try{this.setDocument(a),this.deferred.resolve(this)}catch(a){this.deferred.reject({message:this.absolute+" -> "+a.message,stack:(new Error).stack})}}.bind(this)),c},EPUBJS.Chapter.prototype.render=function(a){return this.load().then(function(a){var b=a.querySelector("head"),c=a.createElement("base");return c.setAttribute("href",this.absolute),b.insertBefore(c,b.firstChild),this.contents=a,new RSVP.Promise(function(b,c){this.triggerHooks("beforeChapterRender",function(){b(a)}.bind(this),this)}.bind(this))}.bind(this)).then(function(a){return(new XMLSerializer).serializeToString(a)}.bind(this))},EPUBJS.Chapter.prototype.url=function(a){var b,c=new RSVP.defer,d=a||this.store,e=this;return d?this.tempUrl?(b=this.tempUrl,c.resolve(b)):d.getUrl(this.absolute).then(function(a){e.tempUrl=a,c.resolve(a)}):(b=this.absolute,c.resolve(b)),c.promise},EPUBJS.Chapter.prototype.setPages=function(a){this.pages=a},EPUBJS.Chapter.prototype.getPages=function(a){return this.pages},EPUBJS.Chapter.prototype.getID=function(){return this.ID},EPUBJS.Chapter.prototype.unload=function(a){this.document=null,this.tempUrl&&a&&(a.revokeUrl(this.tempUrl),this.tempUrl=!1)},EPUBJS.Chapter.prototype.setDocument=function(a){this.document=a,this.contents=a.documentElement,!this.document.evaluate&&document.evaluate&&(this.document.evaluate=document.evaluate)},EPUBJS.Chapter.prototype.cfiFromRange=function(a){var b,c,d,e,f,g,h;if(this.document){if(void 0!==document.evaluate){if(c=EPUBJS.core.getElementXPath(a.startContainer),d=EPUBJS.core.getElementXPath(a.endContainer),e=this.document.evaluate(c,this.document,EPUBJS.core.nsResolver,XPathResult.FIRST_ORDERED_NODE_TYPE,null).singleNodeValue,a.collapsed||(f=this.document.evaluate(d,this.document,EPUBJS.core.nsResolver,XPathResult.FIRST_ORDERED_NODE_TYPE,null).singleNodeValue),b=this.document.createRange(),e)try{b.setStart(e,a.startOffset),!a.collapsed&&f&&b.setEnd(f,a.endOffset)}catch(a){console.log("missed"),e=!1}e||(console.log("not found, try fuzzy match"),g=EPUBJS.core.cleanStringForXpath(a.startContainer.textContent),c="//text()[contains(.,"+g+")]",(e=this.document.evaluate(c,this.document,EPUBJS.core.nsResolver,XPathResult.FIRST_ORDERED_NODE_TYPE,null).singleNodeValue)&&(b.setStart(e,a.startOffset),a.collapsed||(h=EPUBJS.core.cleanStringForXpath(a.endContainer.textContent),d="//text()[contains(.,"+h+")]",(f=this.document.evaluate(d,this.document,EPUBJS.core.nsResolver,XPathResult.FIRST_ORDERED_NODE_TYPE,null).singleNodeValue)&&b.setEnd(f,a.endOffset))))}else b=a;return this.epubcfi.generateCfiFromRange(b,this.cfiBase)}},EPUBJS.Chapter.prototype.find=function(a){var b=this,c=[],d=a.toLowerCase(),e=function(a){for(var e,f,g,h=a.textContent.toLowerCase(),i=b.document.createRange(),j=-1,k=150;f!=-1;)f=h.indexOf(d,j+1),f!=-1&&(i=b.document.createRange(),i.setStart(a,f),i.setEnd(a,f+d.length),e=b.cfiFromRange(i),a.textContent.lengthb?1:a0?i:i+1:0===f?i:f===-1?EPUBJS.core.locationOf(a,b,c,i,h):EPUBJS.core.locationOf(a,b,c,g,i))},EPUBJS.core.indexOfSorted=function(a,b,c,d,e){var f,g=d||0,h=e||b.length,i=parseInt(g+(h-g)/2);return c||(c=function(a,b){return a>b?1:a0;){if(c=d.shift(),"text"===c.type?(e=g.childNodes[c.index],g=e.parentNode||g):g=c.id?f.getElementById(c.id):h[c.index],!g||void 0===g)return console.error("No Element For",c,a.str),!1;h=Array.prototype.slice.call(g.children)}return g},EPUBJS.EpubCFI.prototype.compare=function(a,b){if("string"==typeof a&&(a=new EPUBJS.EpubCFI(a)),"string"==typeof b&&(b=new EPUBJS.EpubCFI(b)),a.spinePos>b.spinePos)return 1;if(a.spinePosb.steps[c].index)return 1;if(a.steps[c].indexb.characterOffset?1:a.characterOffset")},EPUBJS.EpubCFI.prototype.generateRangeFromCfi=function(a,b){var c,d,e,f,g,h,i=b||document,j=i.createRange();return"string"==typeof a&&(a=this.parse(a)),a.spinePos!==-1&&(c=a.steps[a.steps.length-1],void 0!==document.evaluate?(d=this.generateXpathFromSteps(a.steps),e=i.evaluate(d,i,null,XPathResult.FIRST_ORDERED_NODE_TYPE,null).singleNodeValue):(g=this.generateQueryFromSteps(a.steps),(h=i.querySelector(g))&&"text"==c.type&&(e=h.childNodes[c.index])),e?(e&&a.characterOffset>=0?(f=e.length,a.characterOffset-1&&this.hooks[a].splice(c,1):Array.isArray(b)&&b.forEach(function(b){(c=this.hooks[a].indexOf(b))>-1&&this.hooks[a].splice(c,1)},this))},a.prototype.triggerHooks=function(a,b,c){function d(){--f<=0&&b&&b()}var e,f;if(void 0===this.hooks[a])return!1;e=this.hooks[a],f=e.length,0===f&&b&&b(),e.forEach(function(a){a(d,c)})},{register:function(a){if(void 0===EPUBJS.hooks[a]&&(EPUBJS.hooks[a]={}),"object"!=typeof EPUBJS.hooks[a])throw"Already registered: "+a;return EPUBJS.hooks[a]},mixin:function(b){for(var c in a.prototype)b[c]=a.prototype[c]}}}(),EPUBJS.Layout=EPUBJS.Layout||{},EPUBJS.Layout.isFixedLayout=function(a){var b=a.querySelector("[name=viewport]");if(!b||!b.hasAttribute("content"))return!1;var c=b.getAttribute("content");return/width=(\d+)/.test(c)&&/height=(\d+)/.test(c)},EPUBJS.Layout.Reflowable=function(){this.documentElement=null,this.spreadWidth=null},EPUBJS.Layout.Reflowable.prototype.format=function(a,b,c,d){var e=EPUBJS.core.prefixed("columnAxis"),f=EPUBJS.core.prefixed("columnGap"),g=EPUBJS.core.prefixed("columnWidth"),h=EPUBJS.core.prefixed("columnFill"),i=Math.floor(b),j=Math.floor(i/8),k=d>=0?d:j%2==0?j:j-1;return this.documentElement=a,this.spreadWidth=i+k,a.style.overflow="hidden",a.style.width=i+"px",a.style.height=c+"px",a.style[e]="horizontal",a.style[h]="auto",a.style[g]=i+"px",a.style[f]=k+"px",this.colWidth=i,this.gap=k,{pageWidth:this.spreadWidth,pageHeight:c}},EPUBJS.Layout.Reflowable.prototype.calculatePages=function(){var a,b;return this.documentElement.style.width="auto",a=this.documentElement.scrollWidth,b=Math.ceil(a/this.spreadWidth),{displayedPages:b,pageCount:b}},EPUBJS.Layout.ReflowableSpreads=function(){this.documentElement=null,this.spreadWidth=null},EPUBJS.Layout.ReflowableSpreads.prototype.format=function(a,b,c,d){var e=EPUBJS.core.prefixed("columnAxis"),f=EPUBJS.core.prefixed("columnGap"),g=EPUBJS.core.prefixed("columnWidth"),h=EPUBJS.core.prefixed("columnFill"),i=2,j=Math.floor(b),k=j%2==0?j:j-1,l=Math.floor(k/8),m=d>=0?d:l%2==0?l:l-1,n=Math.floor((k-m)/i);return this.documentElement=a,this.spreadWidth=(n+m)*i,a.style.overflow="hidden",a.style.width=k+"px",a.style.height=c+"px",a.style[e]="horizontal",a.style[h]="auto",a.style[f]=m+"px",a.style[g]=n+"px",this.colWidth=n,this.gap=m,{pageWidth:this.spreadWidth,pageHeight:c}},EPUBJS.Layout.ReflowableSpreads.prototype.calculatePages=function(){var a=this.documentElement.scrollWidth,b=Math.ceil(a/this.spreadWidth);return this.documentElement.style.width=b*this.spreadWidth-this.gap+"px",{displayedPages:b,pageCount:2*b}},EPUBJS.Layout.Fixed=function(){this.documentElement=null},EPUBJS.Layout.Fixed.prototype.format=function(a,b,c,d){var e,f,g,h,i=EPUBJS.core.prefixed("columnWidth"),j=EPUBJS.core.prefixed("transform"),k=EPUBJS.core.prefixed("transformOrigin"),l=a.querySelector("[name=viewport]");this.documentElement=a,l&&l.hasAttribute("content")&&(e=l.getAttribute("content"),f=e.split(","),f[0]&&(g=f[0].replace("width=","")),f[1]&&(h=f[1].replace("height=","")));var m=b/g,n=c/h,o=m=d?g.resolve():(c=f,b=new EPUBJS.Chapter(this.spine[c],this.store,this.credentials),this.process(b).then(function(){setTimeout(function(){e(g)},1)})),g.promise}.bind(this);return"number"==typeof a&&(this.break=a),e().then(function(){this.total=this._locations.length-1,this._currentCfi&&(this.currentLocation=this._currentCfi),b.resolve(this._locations)}.bind(this)),b.promise},EPUBJS.Locations.prototype.process=function(a){return a.load().then(function(b){var c,d,e,f=b,g=f.documentElement.querySelector("body"),h=0,i=this.break;this.sprint(g,function(b){var g,j=b.length,k=0;if(0===b.textContent.trim().length)return!1;for(0===h&&(c=f.createRange(),c.setStart(b,0)),g=i-h,g>j&&(h+=j,k=j);k=j?(h+=j-k,k=j):(k+=g,c.setEnd(b,k),e=a.cfiFromRange(c),this._locations.push(e),h=0);d=b}.bind(this)),c&&(c.setEnd(d,d.length),e=a.cfiFromRange(c),this._locations.push(e),h=0)}.bind(this))},EPUBJS.Locations.prototype.sprint=function(a,b){for(var c,d=document.createTreeWalker(a,NodeFilter.SHOW_TEXT,null,!1);c=d.nextNode();)b(c)},EPUBJS.Locations.prototype.locationFromCfi=function(a){return 0===this._locations.length?-1:EPUBJS.core.locationOf(a,this._locations,this.epubcfi.compare)},EPUBJS.Locations.prototype.percentageFromCfi=function(a){var b=this.locationFromCfi(a);return this.percentageFromLocation(b)},EPUBJS.Locations.prototype.percentageFromLocation=function(a){return a&&this.total?a/this.total:0},EPUBJS.Locations.prototype.cfiFromLocation=function(a){var b=-1;return"number"!=typeof a&&(a=parseInt(a)),a>=0&&a1?a/100:a,c=Math.ceil(this.total*b);return this.cfiFromLocation(c)},EPUBJS.Locations.prototype.load=function(a){return this._locations=JSON.parse(a),this.total=this._locations.length-1,this._locations},EPUBJS.Locations.prototype.save=function(a){return JSON.stringify(this._locations)},EPUBJS.Locations.prototype.getCurrent=function(a){return this._current},EPUBJS.Locations.prototype.setCurrent=function(a){var b;if("string"==typeof a)this._currentCfi=a;else{if("number"!=typeof a)return;this._current=a}0!==this._locations.length&&("string"==typeof a?(b=this.locationFromCfi(a),this._current=b):b=a,this.trigger("changed",{percentage:this.percentageFromLocation(b)}))},Object.defineProperty(EPUBJS.Locations.prototype,"currentLocation",{get:function(){return this._current},set:function(a){this.setCurrent(a)}}),RSVP.EventTarget.mixin(EPUBJS.Locations.prototype),EPUBJS.Pagination=function(a){this.pages=[],this.locations=[],this.epubcfi=new EPUBJS.EpubCFI,a&&a.length&&this.process(a)},EPUBJS.Pagination.prototype.process=function(a){a.forEach(function(a){this.pages.push(a.page),this.locations.push(a.cfi)},this),this.pageList=a,this.firstPage=parseInt(this.pages[0]),this.lastPage=parseInt(this.pages[this.pages.length-1]),this.totalPages=this.lastPage-this.firstPage},EPUBJS.Pagination.prototype.pageFromCfi=function(a){var b=-1;if(0===this.locations.length)return-1;var c=EPUBJS.core.indexOfSorted(a,this.locations,this.epubcfi.compare);return c!=-1?b=this.pages[c]:(c=EPUBJS.core.locationOf(a,this.locations,this.epubcfi.compare),void 0!==(b=c-1>=0?this.pages[c-1]:this.pages[0])||(b=-1)),b},EPUBJS.Pagination.prototype.cfiFromPage=function(a){var b=-1;"number"!=typeof a&&(a=parseInt(a));var c=this.pages.indexOf(a);return c!=-1&&(b=this.locations[c]),b},EPUBJS.Pagination.prototype.pageFromPercentage=function(a){return Math.round(this.totalPages*a)},EPUBJS.Pagination.prototype.percentageFromPage=function(a){var b=(a-this.firstPage)/this.totalPages;return Math.round(1e3*b)/1e3},EPUBJS.Pagination.prototype.percentageFromCfi=function(a){var b=this.pageFromCfi(a);return this.percentageFromPage(b)},EPUBJS.Parser=function(a){this.baseUrl=a||""},EPUBJS.Parser.prototype.container=function(a){var b,c,d,e;return a?(b=a.querySelector("rootfile"))?(c=b.getAttribute("full-path"),d=EPUBJS.core.uri(c).directory,e=a.xmlEncoding,{packagePath:c,basePath:d,encoding:e}):void console.error("No RootFile Found"):void console.error("Container File Not Found")},EPUBJS.Parser.prototype.identifier=function(a){var b;return a?(b=a.querySelector("metadata"),b?this.getElementText(b,"identifier"):void console.error("No Metadata Found")):void console.error("Package File Not Found")},EPUBJS.Parser.prototype.packageContents=function(a,b){var c,d,e,f,g,h,i,j,k,l,m,n=this;return b&&(this.baseUrl=b),a?(c=a.querySelector("metadata"))?(d=a.querySelector("manifest"))?(e=a.querySelector("spine"))?(f=n.manifest(d),g=n.findNavPath(d),h=n.findTocPath(d,e),i=n.findCoverPath(a),j=Array.prototype.indexOf.call(e.parentNode.childNodes,e),k=n.spine(e,f),l={},k.forEach(function(a){l[a.href]=a.index}),m=n.metadata(c),m.direction=e.getAttribute("page-progression-direction"),{metadata:m,spine:k,manifest:f,navPath:g,tocPath:h,coverPath:i,spineNodeIndex:j,spineIndexByURL:l}):void console.error("No Spine Found"):void console.error("No Manifest Found"):void console.error("No Metadata Found"):void console.error("Package File Not Found")},EPUBJS.Parser.prototype.findNavPath=function(a){var b=a.querySelector("item[properties$='nav'], item[properties^='nav '], item[properties*=' nav ']");return!!b&&b.getAttribute("href")},EPUBJS.Parser.prototype.findTocPath=function(a,b){var c,d=a.querySelector("item[media-type='application/x-dtbncx+xml']");return d||(c=b.getAttribute("toc"))&&(d=a.querySelector("item[id='"+c+"']")),!!d&&d.getAttribute("href")},EPUBJS.Parser.prototype.metadata=function(a){var b={},c=this;return b.bookTitle=c.getElementText(a,"title"),b.creator=c.getElementText(a,"creator"),b.description=c.getElementText(a,"description"),b.pubdate=c.getElementText(a,"date"),b.publisher=c.getElementText(a,"publisher"),b.identifier=c.getElementText(a,"identifier"),b.language=c.getElementText(a,"language"),b.rights=c.getElementText(a,"rights"),b.modified_date=c.querySelectorText(a,"meta[property='dcterms:modified']"),b.layout=c.querySelectorText(a,"meta[property='rendition:layout']"),b.orientation=c.querySelectorText(a,"meta[property='rendition:orientation']"),b.spread=c.querySelectorText(a,"meta[property='rendition:spread']"),b},EPUBJS.Parser.prototype.findCoverPath=function(a){if("2.0"===a.querySelector("package").getAttribute("version")){var b=a.querySelector('meta[name="cover"]');if(b){var c=b.getAttribute("content"),d=a.querySelector("item[id='"+c+"']");return!!d&&d.getAttribute("href")}return!1}var e=a.querySelector("item[properties='cover-image']");return!!e&&e.getAttribute("href")},EPUBJS.Parser.prototype.getElementText=function(a,b){var c,d=a.getElementsByTagNameNS("http://purl.org/dc/elements/1.1/",b);return d&&0!==d.length?(c=d[0],c.childNodes.length?c.childNodes[0].nodeValue:""):""},EPUBJS.Parser.prototype.querySelectorText=function(a,b){var c=a.querySelector(b);return c&&c.childNodes.length?c.childNodes[0].nodeValue:""},EPUBJS.Parser.prototype.manifest=function(a){var b=this.baseUrl,c={},d=a.querySelectorAll("item");return Array.prototype.slice.call(d).forEach(function(a){var d=a.getAttribute("id"),e=a.getAttribute("href")||"",f=a.getAttribute("media-type")||"",g=a.getAttribute("properties")||"";c[d]={href:e,url:b+e,type:f,properties:g}}),c},EPUBJS.Parser.prototype.spine=function(a,b){var c=a.getElementsByTagName("itemref"),d=Array.prototype.slice.call(c),e=EPUBJS.core.indexOfElementNode(a),f=new EPUBJS.EpubCFI;return d.map(function(a,c){var d=a.getAttribute("idref"),g=f.generateChapterComponent(e,c,d),h=a.getAttribute("properties")||"",i=h.length?h.split(" "):[],j=b[d].properties,k=j.length?j.split(" "):[];return{id:d,linear:a.getAttribute("linear")||"",properties:i,manifestProperties:k,href:b[d].href,url:b[d].url,index:c,cfiBase:g,cfi:"epubcfi("+g+")"}})},EPUBJS.Parser.prototype.querySelectorByType=function(a,b,c){var d=a.querySelector(b+'[*|type="'+c+'"]');if(null!==d&&0!==d.length)return d;d=a.querySelectorAll(b);for(var e=0;e1&&d[1],{cfi:f,href:h,packageUrl:e,page:j}):{href:h,page:j}},EPUBJS.Render.Iframe=function(){this.iframe=null,this.document=null,this.window=null,this.docEl=null,this.bodyEl=null,this.leftPos=0,this.pageWidth=0,this.id=EPUBJS.core.uuid()},EPUBJS.Render.Iframe.prototype.create=function(){return this.element=document.createElement("div"),this.element.id="epubjs-view:"+this.id,this.isMobile=navigator.userAgent.match(/(iPad|iPhone|iPod|Mobile|Android)/g),this.transform=EPUBJS.core.prefixed("transform"),this.element},EPUBJS.Render.Iframe.prototype.addIframe=function(){return this.iframe=document.createElement("iframe"),this.iframe.id="epubjs-iframe:"+this.id,this.iframe.scrolling=this.scrolling||"no",this.iframe.seamless="seamless",this.iframe.style.border="none",this.iframe.addEventListener("load",this.loaded.bind(this),!1),(this._width||this._height)&&(this.iframe.height=this._height,this.iframe.width=this._width),this.iframe},EPUBJS.Render.Iframe.prototype.load=function(a,b){var c=this,d=new RSVP.defer;return this.window&&this.unload(),this.iframe&&this.element.removeChild(this.iframe),this.iframe=this.addIframe(),this.element.appendChild(this.iframe),this.iframe.onload=function(a){c.document=c.iframe.contentDocument,c.docEl=c.document.documentElement,c.headEl=c.document.head,c.bodyEl=c.document.body||c.document.querySelector("body"),c.window=c.iframe.contentWindow,c.window.addEventListener("resize",c.resized.bind(c),!1),c.leftPos=0,c.setLeft(0),c.bodyEl&&(c.bodyEl.style.margin="0"),d.resolve(c.docEl)},this.iframe.onerror=function(a){d.reject({message:"Error Loading Contents: "+a,stack:(new Error).stack})},this.document=this.iframe.contentDocument,this.document?(this.iframe.contentDocument.open(),this.iframe.contentDocument.write(a),this.iframe.contentDocument.close(),d.promise):(d.reject(new Error("No Document Available")),d.promise)},EPUBJS.Render.Iframe.prototype.loaded=function(a){var b,c,d=this.iframe.contentWindow.location.href;this.document=this.iframe.contentDocument,this.docEl=this.document.documentElement,this.headEl=this.document.head,this.bodyEl=this.document.body||this.document.querySelector("body"),this.window=this.iframe.contentWindow,this.window.focus(),"about:blank"!=d&&(b=this.iframe.contentDocument.querySelector("base"),c=b.getAttribute("href"),this.trigger("render:loaded",c))},EPUBJS.Render.Iframe.prototype.resize=function(a,b){this.element&&(this.element.style.height=b,isNaN(a)||a%2==0||(a+=1),this.element.style.width=a,this.iframe&&(this.iframe.height=b,this.iframe.width=a),this._height=b,this._width=a,this.width=this.element.getBoundingClientRect().width||a,this.height=this.element.getBoundingClientRect().height||b)},EPUBJS.Render.Iframe.prototype.resized=function(a){this.width=this.iframe.getBoundingClientRect().width,this.height=this.iframe.getBoundingClientRect().height},EPUBJS.Render.Iframe.prototype.totalWidth=function(){return this.docEl.scrollWidth},EPUBJS.Render.Iframe.prototype.totalHeight=function(){return this.docEl.scrollHeight},EPUBJS.Render.Iframe.prototype.setPageDimensions=function(a,b){this.pageWidth=a,this.pageHeight=b},EPUBJS.Render.Iframe.prototype.setDirection=function(a){this.direction=a,this.docEl&&"rtl"==this.docEl.dir&&(this.docEl.dir="rtl","pre-paginated"!==this.layout&&(this.docEl.style.position="static",this.docEl.style.right="auto"))},EPUBJS.Render.Iframe.prototype.setLeft=function(a){this.isMobile?this.docEl.style[this.transform]="translate("+-a+"px, 0)":this.document.defaultView.scrollTo(a,0)},EPUBJS.Render.Iframe.prototype.setLayout=function(a){this.layout=a},EPUBJS.Render.Iframe.prototype.setStyle=function(a,b,c){c&&(a=EPUBJS.core.prefixed(a)),this.bodyEl&&(this.bodyEl.style[a]=b)},EPUBJS.Render.Iframe.prototype.removeStyle=function(a){this.bodyEl&&(this.bodyEl.style[a]="")},EPUBJS.Render.Iframe.prototype.setClasses=function(a){this.bodyEl&&(this.bodyEl.className=a.join(" "))},EPUBJS.Render.Iframe.prototype.addHeadTag=function(a,b,c){var d=c||this.document,e=d.createElement(a),f=d.head;for(var g in b)e.setAttribute(g,b[g]);f&&f.insertBefore(e,f.firstChild)},EPUBJS.Render.Iframe.prototype.page=function(a){this.leftPos=this.pageWidth*(a-1),"rtl"===this.direction&&(this.leftPos=this.leftPos*-1),this.setLeft(this.leftPos)},EPUBJS.Render.Iframe.prototype.getPageNumberByElement=function(a){var b;if(a)return b=this.leftPos+a.getBoundingClientRect().left,Math.floor(b/this.pageWidth)+1},EPUBJS.Render.Iframe.prototype.getPageNumberByRect=function(a){var b;return b=this.leftPos+a.left,Math.floor(b/this.pageWidth)+1},EPUBJS.Render.Iframe.prototype.getBaseElement=function(){return this.bodyEl},EPUBJS.Render.Iframe.prototype.getDocumentElement=function(){return this.docEl},EPUBJS.Render.Iframe.prototype.isElementVisible=function(a){var b,c;return!!(a&&"function"==typeof a.getBoundingClientRect&&(b=a.getBoundingClientRect(),c=b.left,0!==b.width&&0!==b.height&&c>=0&&c=1&&a<=this.displayedPages&&(this.chapterPos=a,this.render.page(a),this.visibleRangeCfi=this.getVisibleRangeCfi(),this.currentLocationCfi=this.visibleRangeCfi.start,this.trigger("renderer:locationChanged",this.currentLocationCfi),this.trigger("renderer:visibleRangeChanged",this.visibleRangeCfi),!0):(console.warn("pageMap not set, queuing"),this._q.enqueue("page",arguments),!0)},EPUBJS.Renderer.prototype.nextPage=function(){return this.page(this.chapterPos+1)},EPUBJS.Renderer.prototype.prevPage=function(){return this.page(this.chapterPos-1)},EPUBJS.Renderer.prototype.pageByElement=function(a){var b;a&&(b=this.render.getPageNumberByElement(a),this.page(b))},EPUBJS.Renderer.prototype.lastPage=function(){if(this._moving)return this._q.enqueue("lastPage",arguments);this.page(this.displayedPages)},EPUBJS.Renderer.prototype.firstPage=function(){if(this._moving)return this._q.enqueue("firstPage",arguments);this.page(1)},EPUBJS.Renderer.prototype.section=function(a){var b=this.doc.getElementById(a);b&&this.pageByElement(b)},EPUBJS.Renderer.prototype.firstElementisTextNode=function(a){var b=a.childNodes;return!!(b.length&&b[0]&&3===b[0].nodeType&&b[0].textContent.trim().length)},EPUBJS.Renderer.prototype.isGoodNode=function(a){return["audio","canvas","embed","iframe","img","math","object","svg","video"].indexOf(a.tagName.toLowerCase())!==-1||this.firstElementisTextNode(a)},EPUBJS.Renderer.prototype.walk=function(a,b,c){for(var d,e,f,g,h=a,i=[h],j=1e4,k=0;!d&&i.length;){if(a=i.shift(),this.containsPoint(a,b,c)&&this.isGoodNode(a)&&(d=a),!d&&a&&a.childElementCount>0){if(!(e=a.children)||!e.length)return d;f=e.length?e.length:0;for(var l=f-1;l>=0;l--)e[l]!=g&&i.unshift(e[l])}if(!d&&0===i.length&&h&&null!==h.parentNode&&(i.push(h.parentNode),g=h,h=h.parentNode),++k>j){console.error("ENDLESS LOOP");break}}return d},EPUBJS.Renderer.prototype.containsPoint=function(a,b,c){var d;return!!(a&&"function"==typeof a.getBoundingClientRect&&(d=a.getBoundingClientRect(),0!==d.width&&0!==d.height&&d.left>=b&&b<=d.left+d.width))},EPUBJS.Renderer.prototype.textSprint=function(a,b){var c,d,e=function(a){return/^\s*$/.test(a.data)?NodeFilter.FILTER_REJECT:NodeFilter.FILTER_ACCEPT};try{for(c=document.createTreeWalker(a,NodeFilter.SHOW_TEXT,{acceptNode:e},!1);d=c.nextNode();)b(d)}catch(f){for(c=document.createTreeWalker(a,NodeFilter.SHOW_TEXT,e,!1);d=c.nextNode();)b(d)}},EPUBJS.Renderer.prototype.sprint=function(a,b){for(var c,d=document.createTreeWalker(a,NodeFilter.SHOW_ELEMENT,null,!1);c=d.nextNode();)b(c)},EPUBJS.Renderer.prototype.mapPage=function(){var a,b,c,d,e,f,g,h,i=this,j=[],k=this.render.getBaseElement(),l=1,m=this.layout.colWidth+this.layout.gap,n=this.formated.pageWidth*(this.chapterPos-1),o=m*l-n,p=0,q=function(b){var c,e,f;if(b.nodeType==Node.TEXT_NODE){if(e=document.createRange(),e.selectNodeContents(b),!(c=e.getBoundingClientRect())||0===c.width&&0===c.height)return;c.left>p&&(f=r(b)),c.right>p&&(f=r(b)),d=b,f&&(a=null)}},r=function(e){var f;return i.splitTextNodeIntoWordsRanges(e).forEach(function(e){var g=e.getBoundingClientRect();!g||0===g.width&&0===g.height||(g.left+g.width0&&(b&&(b.setEnd(a,e),c.push(b)),b=this.doc.createRange(),b.setStart(a,e+1));return b&&(b.setEnd(a,d.length),c.push(b)),c},EPUBJS.Renderer.prototype.rangePosition=function(a){var b;return b=a.getClientRects(),b.length?b[0]:null},EPUBJS.Renderer.prototype.getPageCfi=function(){var a=2*this.chapterPos-1;return this.pageMap[a].start},EPUBJS.Renderer.prototype.getRange=function(a,b,c){var d,e=this.doc.createRange();return c=!0,void 0===document.caretPositionFromPoint||c?void 0===document.caretRangeFromPoint||c?(this.visibileEl=this.findElementAfter(a,b),e.setStart(this.visibileEl,1)):e=this.doc.caretRangeFromPoint(a,b):(d=this.doc.caretPositionFromPoint(a,b),e.setStart(d.offsetNode,d.offset)),e},EPUBJS.Renderer.prototype.pagesInCurrentChapter=function(){return this.pageMap?this.pageMap.length:(console.warn("page map not loaded"),!1)},EPUBJS.Renderer.prototype.currentRenderedPage=function(){return this.pageMap?this.spreads&&this.pageMap.length>1?2*this.chapterPos-1:this.chapterPos:(console.warn("page map not loaded"),!1)},EPUBJS.Renderer.prototype.getRenderedPagesLeft=function(){var a,b;return this.pageMap?(b=this.pageMap.length,a=this.spreads?2*this.chapterPos-1:this.chapterPos,b-a):(console.warn("page map not loaded"),!1)},EPUBJS.Renderer.prototype.getVisibleRangeCfi=function(){var a,b,c;return this.pageMap?(this.spreads?(a=2*this.chapterPos,b=this.pageMap[a-2],c=b,this.pageMap.length>1&&this.pageMap.length>a-1&&(c=this.pageMap[a-1])):(a=this.chapterPos,b=this.pageMap[a-1],c=b),b||(console.warn("page range miss:",a,this.pageMap),b=this.pageMap[this.pageMap.length-1],c=b),{start:b.start,end:c.end}):(console.warn("page map not loaded"),!1)},EPUBJS.Renderer.prototype.gotoCfi=function(a){var b,c,d;if(this._moving)return this._q.enqueue("gotoCfi",arguments);if(EPUBJS.core.isString(a)&&(a=this.epubcfi.parse(a)),void 0===document.evaluate)(c=this.epubcfi.addMarker(a,this.doc))&&(b=this.render.getPageNumberByElement(c),this.epubcfi.removeMarker(c,this.doc),this.page(b));else if(d=this.epubcfi.generateRangeFromCfi(a,this.doc)){var e=d.getBoundingClientRect();b=e?this.render.getPageNumberByRect(e):1,this.page(b),this.currentLocationCfi=a.str}else this.page(1)},EPUBJS.Renderer.prototype.findFirstVisible=function(a){var b,c=a||this.render.getBaseElement();return b=this.walk(c,0,0),b?b:a},EPUBJS.Renderer.prototype.findElementAfter=function(a,b,c){var d,e=c||this.render.getBaseElement();return d=this.walk(e,a,b),d?d:e},EPUBJS.Renderer.prototype.resize=function(a,b,c){this.width=a,this.height=b,c!==!1&&this.render.resize(this.width,this.height),this.contents&&this.reformat(),this.trigger("renderer:resized",{width:this.width,height:this.height})},EPUBJS.Renderer.prototype.onResized=function(a){this.trigger("renderer:beforeResize");var b=this.container.clientWidth,c=this.container.clientHeight;this.resize(b,c,!1)},EPUBJS.Renderer.prototype.addEventListeners=function(){this.render.document&&this.listenedEvents.forEach(function(a){this.render.document.addEventListener(a,this.triggerEvent.bind(this),!1)},this)},EPUBJS.Renderer.prototype.removeEventListeners=function(){this.render.document&&this.listenedEvents.forEach(function(a){this.render.document.removeEventListener(a,this.triggerEvent,!1)},this)},EPUBJS.Renderer.prototype.triggerEvent=function(a){this.trigger("renderer:"+a.type,a)},EPUBJS.Renderer.prototype.addSelectionListeners=function(){this.render.document.addEventListener("selectionchange",this.onSelectionChange.bind(this),!1)},EPUBJS.Renderer.prototype.removeSelectionListeners=function(){this.render.document&&this.doc.removeEventListener("selectionchange",this.onSelectionChange,!1)},EPUBJS.Renderer.prototype.onSelectionChange=function(a){this.selectionEndTimeout&&clearTimeout(this.selectionEndTimeout),this.selectionEndTimeout=setTimeout(function(){this.selectedRange=this.render.window.getSelection(),this.trigger("renderer:selected",this.selectedRange)}.bind(this),500)},EPUBJS.Renderer.prototype.setMinSpreadWidth=function(a){this.minSpreadWidth=a,this.spreads=this.determineSpreads(a)},EPUBJS.Renderer.prototype.determineSpreads=function(a){return!(this.isForcedSingle||!a||this.width=d?h.resolve():(c=a[e].url,g=window.encodeURIComponent(c),EPUBJS.core.request(c,"binary").then(function(a){return localforage.setItem(g,a)}).then(function(a){e++,setTimeout(function(){f(h)},1)})),h.promise}.bind(this);return Array.isArray(a)||(a=[a]),f().then(function(){c.resolve()}.bind(this)),c.promise},EPUBJS.Storage.prototype.token=function(a,b){var c=window.encodeURIComponent(a);return localforage.setItem(c,b).then(function(a){return null!==a})},EPUBJS.Storage.prototype.isStored=function(a){var b=window.encodeURIComponent(a);return localforage.getItem(b).then(function(a){return null!==a})},EPUBJS.Storage.prototype.getText=function(a){var b=window.encodeURIComponent(a);return EPUBJS.core.request(a,"arraybuffer",this.withCredentials).then(function(a){return this.offline&&(this.offline=!1,this.trigger("offline",!1)),localforage.setItem(b,a),a}.bind(this)).then(function(b){var c=new RSVP.defer,d=EPUBJS.core.getMimeType(a),e=new Blob([b],{type:d}),f=new FileReader;return f.addEventListener("loadend",function(){c.resolve(f.result)}),f.readAsText(e,d),c.promise}).catch(function(){var c=new RSVP.defer,d=localforage.getItem(b);return this.offline||(this.offline=!0,this.trigger("offline",!0)),d?(d.then(function(b){var d=EPUBJS.core.getMimeType(a),e=new Blob([b],{type:d}),f=new FileReader;f.addEventListener("loadend",function(){c.resolve(f.result)}),f.readAsText(e,d)}),c.promise):(c.reject({message:"File not found in the storage: "+a,stack:(new Error).stack}),c.promise)}.bind(this))},EPUBJS.Storage.prototype.getUrl=function(a){var b=window.encodeURIComponent(a);return EPUBJS.core.request(a,"arraybuffer",this.withCredentials).then(function(c){return this.offline&&(this.offline=!1,this.trigger("offline",!1)),localforage.setItem(b,c),a}.bind(this)).catch(function(){var c,d,e=new RSVP.defer,f=window.URL||window.webkitURL||window.mozURL;return this.offline||(this.offline=!0,this.trigger("offline",!0)),b in this.urlCache?(e.resolve(this.urlCache[b]),e.promise):(c=localforage.getItem(b))?(c.then(function(c){var g=new Blob([c],{type:EPUBJS.core.getMimeType(a)});d=f.createObjectURL(g),e.resolve(d),this.urlCache[b]=d}.bind(this)),e.promise):(e.reject({message:"File not found in the storage: "+a,stack:(new Error).stack}),e.promise)}.bind(this))},EPUBJS.Storage.prototype.getXml=function(a){var b=window.encodeURIComponent(a);return EPUBJS.core.request(a,"arraybuffer",this.withCredentials).then(function(a){return this.offline&&(this.offline=!1,this.trigger("offline",!1)),localforage.setItem(b,a),a}.bind(this)).then(function(b){var c=new RSVP.defer,d=EPUBJS.core.getMimeType(a),e=new Blob([b],{type:d}),f=new FileReader;return f.addEventListener("loadend",function(){var a=new DOMParser,b=a.parseFromString(f.result,"text/xml");c.resolve(b)}),f.readAsText(e,d),c.promise}).catch(function(){var c=new RSVP.defer,d=localforage.getItem(b);return this.offline||(this.offline=!0,this.trigger("offline",!0)),d?(d.then(function(b){var d=EPUBJS.core.getMimeType(a),e=new Blob([b],{type:d}),f=new FileReader;f.addEventListener("loadend",function(){var a=new DOMParser,b=a.parseFromString(f.result,"text/xml");c.resolve(b)}),f.readAsText(e,d)}),c.promise):(c.reject({message:"File not found in the storage: "+a,stack:(new Error).stack}),c.promise)}.bind(this))},EPUBJS.Storage.prototype.revokeUrl=function(a){var b=window.URL||window.webkitURL||window.mozURL,c=this.urlCache[a];c&&b.revokeObjectURL(c)},EPUBJS.Storage.prototype.failed=function(a){console.error(a)},RSVP.EventTarget.mixin(EPUBJS.Storage.prototype),EPUBJS.Unarchiver=function(a){this.checkRequirements(),this.urlCache={}},EPUBJS.Unarchiver.prototype.checkRequirements=function(a){"undefined"==typeof JSZip&&console.error("JSZip lib not loaded")},EPUBJS.Unarchiver.prototype.open=function(a,b){if(a instanceof ArrayBuffer){this.zip=new JSZip(a);var c=new RSVP.defer;return c.resolve(),c.promise}return EPUBJS.core.request(a,"binary").then(function(a){this.zip=new JSZip(a)}.bind(this))},EPUBJS.Unarchiver.prototype.getXml=function(a,b){var c=window.decodeURIComponent(a);return this.getText(c,b).then(function(b){var c=new DOMParser,d=EPUBJS.core.getMimeType(a);return 65279===b.charCodeAt(0)&&(b=b.slice(1)),c.parseFromString(b,d)})},EPUBJS.Unarchiver.prototype.getUrl=function(a,b){var c,d,e=this,f=new RSVP.defer,g=window.decodeURIComponent(a),h=this.zip.file(g),i=window.URL||window.webkitURL||window.mozURL;return h?a in this.urlCache?(f.resolve(this.urlCache[a]),f.promise):(d=new Blob([h.asUint8Array()],{type:EPUBJS.core.getMimeType(h.name)}),c=i.createObjectURL(d),f.resolve(c),e.urlCache[a]=c,f.promise):(f.reject({message:"File not found in the epub: "+a,stack:(new Error).stack}),f.promise)},EPUBJS.Unarchiver.prototype.getText=function(a,b){var c,d=new RSVP.defer,e=window.decodeURIComponent(a),f=this.zip.file(e);return f?(c=f.asText(),d.resolve(c),d.promise):(d.reject({message:"File not found in the epub: "+a,stack:(new Error).stack}),d.promise)},EPUBJS.Unarchiver.prototype.revokeUrl=function(a){var b=window.URL||window.webkitURL||window.mozURL,c=this.urlCache[a];c&&b.revokeObjectURL(c)},EPUBJS.Unarchiver.prototype.failed=function(a){console.error(a)},EPUBJS.Unarchiver.prototype.afterSaved=function(a){this.callback()},EPUBJS.Unarchiver.prototype.toStorage=function(a){function b(){0===--e&&d.afterSaved()}var c=0,d=this,e=a.length;a.forEach(function(a){setTimeout(function(a){d.saveEntryFileToStorage(a,b)},c,a),c+=20}),console.log("time",c)},function(){var a={application:{ecmascript:["es","ecma"],javascript:"js",ogg:"ogx",pdf:"pdf",postscript:["ps","ai","eps","epsi","epsf","eps2","eps3"],"rdf+xml":"rdf",smil:["smi","smil"],"xhtml+xml":["xhtml","xht"],xml:["xml","xsl","xsd","opf","ncx"],zip:"zip","x-httpd-eruby":"rhtml","x-latex":"latex","x-maker":["frm","maker","frame","fm","fb","book","fbdoc"],"x-object":"o","x-shockwave-flash":["swf","swfl"],"x-silverlight":"scr","epub+zip":"epub","font-tdpfr":"pfr","inkml+xml":["ink","inkml"],json:"json","jsonml+json":"jsonml","mathml+xml":"mathml","metalink+xml":"metalink",mp4:"mp4s","omdoc+xml":"omdoc",oxps:"oxps","vnd.amazon.ebook":"azw",widget:"wgt","x-dtbook+xml":"dtb","x-dtbresource+xml":"res","x-font-bdf":"bdf","x-font-ghostscript":"gsf","x-font-linux-psf":"psf","x-font-otf":"otf","x-font-pcf":"pcf","x-font-snf":"snf","x-font-ttf":["ttf","ttc"],"x-font-type1":["pfa","pfb","pfm","afm"],"x-font-woff":"woff","x-mobipocket-ebook":["prc","mobi"],"x-mspublisher":"pub","x-nzb":"nzb","x-tgif":"obj","xaml+xml":"xaml","xml-dtd":"dtd","xproc+xml":"xpl","xslt+xml":"xslt","internet-property-stream":"acx","x-compress":"z","x-compressed":"tgz","x-gzip":"gz"},audio:{flac:"flac",midi:["mid","midi","kar","rmi"],mpeg:["mpga","mpega","mp2","mp3","m4a","mp2a","m2a","m3a"],mpegurl:"m3u",ogg:["oga","ogg","spx"],"x-aiff":["aif","aiff","aifc"],"x-ms-wma":"wma","x-wav":"wav",adpcm:"adp",mp4:"mp4a",webm:"weba","x-aac":"aac","x-caf":"caf","x-matroska":"mka","x-pn-realaudio-plugin":"rmp",xm:"xm",mid:["mid","rmi"]},image:{gif:"gif",ief:"ief",jpeg:["jpeg","jpg","jpe"],pcx:"pcx",png:"png","svg+xml":["svg","svgz"],tiff:["tiff","tif"],"x-icon":"ico",bmp:"bmp",webp:"webp","x-pict":["pic","pct"],"x-tga":"tga","cis-cod":"cod"},message:{rfc822:["eml","mime","mht","mhtml","nws"]},text:{"cache-manifest":["manifest","appcache"],calendar:["ics","icz","ifb"],css:"css",csv:"csv",h323:"323",html:["html","htm","shtml","stm"],iuls:"uls",mathml:"mml",plain:["txt","text","brf","conf","def","list","log","in","bas"],richtext:"rtx","tab-separated-values":"tsv","x-bibtex":"bib","x-dsrc":"d","x-diff":["diff","patch"],"x-haskell":"hs","x-java":"java","x-literate-haskell":"lhs","x-moc":"moc","x-pascal":["p","pas"],"x-pcs-gcd":"gcd","x-perl":["pl","pm"],"x-python":"py","x-scala":"scala","x-setext":"etx","x-tcl":["tcl","tk"],"x-tex":["tex","ltx","sty","cls"],"x-vcard":"vcf",sgml:["sgml","sgm"],"x-c":["c","cc","cxx","cpp","h","hh","dic"],"x-fortran":["f","for","f77","f90"],"x-opml":"opml","x-nfo":"nfo","x-sfv":"sfv","x-uuencode":"uu",webviewhtml:"htt"},video:{mpeg:["mpeg","mpg","mpe","m1v","m2v","mp2","mpa","mpv2"],mp4:["mp4","mp4v","mpg4"],quicktime:["qt","mov"],ogg:"ogv","vnd.mpegurl":["mxu","m4u"],"x-flv":"flv","x-la-asf":["lsf","lsx"],"x-mng":"mng","x-ms-asf":["asf","asx","asr"],"x-ms-wm":"wm","x-ms-wmv":"wmv","x-ms-wmx":"wmx","x-ms-wvx":"wvx","x-msvideo":"avi","x-sgi-movie":"movie","x-matroska":["mpv","mkv","mk3d","mks"],"3gpp2":"3g2",h261:"h261",h263:"h263",h264:"h264",jpeg:"jpgv",jpm:["jpm","jpgm"],mj2:["mj2","mjp2"],"vnd.ms-playready.media.pyv":"pyv","vnd.uvvu.mp4":["uvu","uvvu"],"vnd.vivo":"viv",webm:"webm","x-f4v":"f4v","x-m4v":"m4v","x-ms-vob":"vob","x-smv":"smv"}},b=function(){var b,c,d,e,f={};for(b in a)if(a.hasOwnProperty(b))for(c in a[b])if(a[b].hasOwnProperty(c))if("string"==typeof(d=a[b][c]))f[d]=b+"/"+c;else for(e=0;en/2.5&&(p=n/2.5,pop_content.style.maxHeight=p+"px"),popRect.height+l>=n-25?(c.style.top=l-popRect.height+"px",c.classList.add("above")):c.classList.remove("above"),k-popRect.width<=0?(c.style.left=k+"px",c.classList.add("left")):c.classList.remove("left"),k+popRect.width/2>=o?(c.style.left=k-300+"px",popRect=c.getBoundingClientRect(),c.style.left=k-popRect.width+"px",popRect.height+l>=n-25?(c.style.top=l-popRect.height+"px",c.classList.add("above")):c.classList.remove("above"),c.classList.add("right")):c.classList.remove("right")}function d(){f[i].classList.add("on")}function e(){f[i].classList.remove("on")}function g(){setTimeout(function(){f[i].classList.remove("show")},100)}var h,i,j,k,l,m;"noteref"==a.getAttribute("epub:type")&&(h=a.getAttribute("href"),i=h.replace("#",""),j=b.render.document.getElementById(i),a.addEventListener("mouseover",c,!1),a.addEventListener("mouseout",g,!1))}),a&&a()},EPUBJS.Hooks.register("beforeChapterDisplay").mathml=function(a,b){if(b.currentChapter.manifestProperties.indexOf("mathml")!==-1){b.render.iframe.contentWindow.mathmlCallback=a;var c=document.createElement("script");c.type="text/x-mathjax-config",c.innerHTML=' MathJax.Hub.Register.StartupHook("End",function () { window.mathmlCallback(); }); MathJax.Hub.Config({jax: ["input/TeX","input/MathML","output/SVG"],extensions: ["tex2jax.js","mml2jax.js","MathEvents.js"],TeX: {extensions: ["noErrors.js","noUndefined.js","autoload-all.js"]},MathMenu: {showRenderer: false},menuSettings: {zoom: "Click"},messageStyle: "none"}); ',b.doc.body.appendChild(c),EPUBJS.core.addScript("http://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML",null,b.doc.head)}else a&&a()},EPUBJS.Hooks.register("beforeChapterDisplay").smartimages=function(a,b){var c=b.contents.querySelectorAll("img"),d=Array.prototype.slice.call(c),e=b.height;if("reflowable"!=b.layoutSettings.layout)return void a();d.forEach(function(a){var c=function(){var c,d=a.getBoundingClientRect(),f=d.height,g=d.top,h=a.getAttribute("data-height"),i=h||f,j=Number(getComputedStyle(a,"").fontSize.match(/(\d*(\.\d*)?)px/)[1]),k=j?j/2:0;e=b.contents.clientHeight,g<0&&(g=0),a.style.maxWidth="100%",i+g>=e?(ge&&(a.style.maxHeight=e+"px",a.style.width="auto",d=a.getBoundingClientRect(),i=d.height),a.style.display="block",a.style.WebkitColumnBreakBefore="always",a.style.breakBefore="column"),a.setAttribute("data-height",c)):(a.style.removeProperty("max-height"),a.style.removeProperty("margin-top"))},d=function(){b.off("renderer:resized",c),b.off("renderer:chapterUnload",this)};a.addEventListener("load",c,!1),b.on("renderer:resized",c),b.on("renderer:chapterUnload",d),c()}),a&&a()},EPUBJS.Hooks.register("beforeChapterDisplay").transculsions=function(a,b){var c=b.contents.querySelectorAll("[transclusion]");Array.prototype.slice.call(c).forEach(function(a){function c(){j=g,k=h,j>chapter.colWidth&&(d=chapter.colWidth/j,j=chapter.colWidth,k*=d),f.width=j,f.height=k}var d,e=a.getAttribute("ref"),f=document.createElement("iframe"),g=a.getAttribute("width"),h=a.getAttribute("height"),i=a.parentNode,j=g,k=h;c(),b.listenUntil("renderer:resized","renderer:chapterUnloaded",c),f.src=e,i.replaceChild(f,a)}),a&&a()}; \ No newline at end of file diff --git a/static/js/reader/hooks.min.map b/static/js/reader/hooks.min.map deleted file mode 100644 index 5da22bee6..000000000 --- a/static/js/reader/hooks.min.map +++ /dev/null @@ -1 +0,0 @@ -{"version":3,"file":"hooks.min.js","sources":["../../hooks/default/endnotes.js","../../hooks/default/mathml.js","../../hooks/default/smartimages.js","../../hooks/default/transculsions.js"],"names":["EPUBJS","Hooks","register","endnotes","callback","renderer","notes","contents","querySelectorAll","items","Array","prototype","slice","call","attr","type","folder","core","location","pathname","popups","cssPath","addCss","render","document","head","forEach","item","showPop","pop","itemRect","iheight","height","iwidth","width","maxHeight","txt","el","cloneNode","querySelector","id","createElement","setAttribute","pop_content","appendChild","body","addEventListener","onPop","offPop","on","hidePop","this","getBoundingClientRect","left","top","classList","add","popRect","style","remove","setTimeout","href","epubType","getAttribute","replace","getElementById","mathml","currentChapter","manifestProperties","indexOf","iframe","contentWindow","mathmlCallback","s","innerHTML","doc","addScript","smartimages","images","layoutSettings","layout","size","newHeight","rectHeight","oHeight","fontSize","Number","getComputedStyle","match","fontAdjust","clientHeight","display","removeProperty","unloaded","off","transculsions","trans","orginal_width","orginal_height","chapter","colWidth","ratio","src","parent","parentNode","listenUntil","replaceChild"],"mappings":"AAAAA,OAAOC,MAAMC,SAAS,wBAAwBC,SAAW,SAASC,EAAUC,GAE1E,GAAIC,GAAQD,EAASE,SAASC,iBAAiB,WAC9CC,EAAQC,MAAMC,UAAUC,MAAMC,KAAKP,GACnCQ,EAAO,YACPC,EAAO,UACPC,EAAShB,OAAOiB,KAAKD,OAAOE,SAASC,UAErCC,GADWJ,EAAShB,OAAOqB,SAAYL,KAGxChB,QAAOiB,KAAKK,OAAOtB,OAAOqB,QAAU,aAAa,EAAOhB,EAASkB,OAAOC,SAASC,MAGjFhB,EAAMiB,QAAQ,SAASC,GAqBtB,QAASC,KACR,GAICC,GAEAC,EALAC,EAAU1B,EAAS2B,OACnBC,EAAS5B,EAAS6B,MAGlBC,EAAY,GAGTC,KACHP,EAAMQ,EAAGC,WAAU,GACnBF,EAAMP,EAAIU,cAAc,MAKrBnB,EAAOoB,KACVpB,EAAOoB,GAAMhB,SAASiB,cAAc,OACpCrB,EAAOoB,GAAIE,aAAa,QAAS,SAEjCC,YAAcnB,SAASiB,cAAc,OAErCrB,EAAOoB,GAAII,YAAYD,aAEvBA,YAAYC,YAAYR,GACxBO,YAAYD,aAAa,QAAS,eAElCrC,EAASkB,OAAOC,SAASqB,KAAKD,YAAYxB,EAAOoB,IAGjDpB,EAAOoB,GAAIM,iBAAiB,YAAaC,GAAO,GAChD3B,EAAOoB,GAAIM,iBAAiB,WAAYE,GAAQ,GAKhD3C,EAAS4C,GAAG,uBAAwBC,EAASC,MAC7C9C,EAAS4C,GAAG,uBAAwBD,EAAQG,OAI7CtB,EAAMT,EAAOoB,GAIbV,EAAWH,EAAKyB,wBAChBC,EAAOvB,EAASuB,KAChBC,EAAMxB,EAASwB,IAGfzB,EAAI0B,UAAUC,IAAI,QAGlBC,QAAU5B,EAAIuB,wBAGdvB,EAAI6B,MAAML,KAAOA,EAAOI,QAAQvB,MAAQ,EAAI,KAC5CL,EAAI6B,MAAMJ,IAAMA,EAAM,KAInBnB,EAAYJ,EAAU,MACxBI,EAAYJ,EAAU,IACtBY,YAAYe,MAAMvB,UAAYA,EAAY,MAIxCsB,QAAQzB,OAASsB,GAAOvB,EAAU,IACpCF,EAAI6B,MAAMJ,IAAMA,EAAMG,QAAQzB,OAAU,KACxCH,EAAI0B,UAAUC,IAAI,UAElB3B,EAAI0B,UAAUI,OAAO,SAInBN,EAAOI,QAAQvB,OAAS,GAC1BL,EAAI6B,MAAML,KAAOA,EAAO,KACxBxB,EAAI0B,UAAUC,IAAI,SAElB3B,EAAI0B,UAAUI,OAAO,QAInBN,EAAOI,QAAQvB,MAAQ,GAAKD,GAE9BJ,EAAI6B,MAAML,KAAOA,EAAO,IAAM,KAE9BI,QAAU5B,EAAIuB,wBACdvB,EAAI6B,MAAML,KAAOA,EAAOI,QAAQvB,MAAQ,KAErCuB,QAAQzB,OAASsB,GAAOvB,EAAU,IACpCF,EAAI6B,MAAMJ,IAAMA,EAAMG,QAAQzB,OAAU,KACxCH,EAAI0B,UAAUC,IAAI,UAElB3B,EAAI0B,UAAUI,OAAO,SAGtB9B,EAAI0B,UAAUC,IAAI,UAElB3B,EAAI0B,UAAUI,OAAO,SAMvB,QAASZ,KACR3B,EAAOoB,GAAIe,UAAUC,IAAI,MAG1B,QAASR,KACR5B,EAAOoB,GAAIe,UAAUI,OAAO,MAG7B,QAAST,KACRU,WAAW,WACVxC,EAAOoB,GAAIe,UAAUI,OAAO,SAC1B,KAxIJ,GACCE,GACArB,EACAH,EAGAgB,EACAC,EACAlB,EARG0B,EAAWnC,EAAKoC,aAAajD,EAU9BgD,IAAY/C,IAEf8C,EAAOlC,EAAKoC,aAAa,QACzBvB,EAAKqB,EAAKG,QAAQ,IAAK,IACvB3B,EAAKhC,EAASkB,OAAOC,SAASyC,eAAezB,GAG7Cb,EAAKmB,iBAAiB,YAAalB,GAAS,GAC5CD,EAAKmB,iBAAiB,WAAYI,GAAS,MA4HzC9C,GAAUA,KC5JfJ,OAAOC,MAAMC,SAAS,wBAAwBgE,OAAS,SAAS9D,EAAUC,GAGtE,GAAoE,KAAjEA,EAAS8D,eAAeC,mBAAmBC,QAAQ,UAAkB,CAGpEhE,EAASkB,OAAO+C,OAAOC,cAAcC,eAAiBpE,CAGtD,IAAIqE,GAAIjD,SAASiB,cAAc,SAC/BgC,GAAE1D,KAAO,wBACT0D,EAAEC,UAAY,6ZAMdrE,EAASsE,IAAI9B,KAAKD,YAAY6B,GAE9BzE,OAAOiB,KAAK2D,UAAU,gFAAiF,KAAMvE,EAASsE,IAAIlD,UAGvHrB,IAAUA,KCtBrBJ,OAAOC,MAAMC,SAAS,wBAAwB2E,YAAc,SAASzE,EAAUC,GAC7E,GAAIyE,GAASzE,EAASE,SAASC,iBAAiB,OAC/CC,EAAQC,MAAMC,UAAUC,MAAMC,KAAKiE,GACnC/C,EAAU1B,EAAS2B,MAGpB,OAAqC,cAAlC3B,EAAS0E,eAAeC,WAC1B5E,MAIDK,EAAMiB,QAAQ,SAASC,GAEtB,GAAIsD,GAAO,WACV,GAKCC,GALGpD,EAAWH,EAAKyB,wBACnB+B,EAAarD,EAASE,OACtBsB,EAAMxB,EAASwB,IACf8B,EAAUzD,EAAKoC,aAAa,eAC5B/B,EAASoD,GAAWD,EAEpBE,EAAWC,OAAOC,iBAAiB5D,EAAM,IAAI0D,SAASG,MAAM,mBAAmB,IAC/EC,EAAaJ,EAAWA,EAAW,EAAI,CAExCtD,GAAU1B,EAASE,SAASmF,aACnB,EAANpC,IAASA,EAAM,GAEftB,EAASsB,GAAOvB,GAETA,EAAQ,EAAduB,GAEF4B,EAAYnD,EAAUuB,EAAMmC,EAC5B9D,EAAK+B,MAAMvB,UAAY+C,EAAY,KACnCvD,EAAK+B,MAAMxB,MAAO,SAEfF,EAASD,IACXJ,EAAK+B,MAAMvB,UAAYJ,EAAU,KACjCJ,EAAK+B,MAAMxB,MAAO,OAClBJ,EAAWH,EAAKyB,wBAChBpB,EAASF,EAASE,QAEnBL,EAAK+B,MAAMiC,QAAU,QACrBhE,EAAK+B,MAA+B,wBAAI,SACxC/B,EAAK+B,MAAmB,YAAI,UAI7B/B,EAAKe,aAAa,cAAewC,KAGjCvD,EAAK+B,MAAMkC,eAAe,cAC1BjE,EAAK+B,MAAMkC,eAAe,gBAIxBC,EAAW,WAEdxF,EAASyF,IAAI,mBAAoBb,GACjC5E,EAASyF,IAAI,yBAA0B3C,MAGxCxB,GAAKmB,iBAAiB,OAAQmC,GAAM,GAEpC5E,EAAS4C,GAAG,mBAAoBgC,GAEhC5E,EAAS4C,GAAG,yBAA0B4C,GAEtCZ,WAIE7E,GAAUA,OCtEfJ,OAAOC,MAAMC,SAAS,wBAAwB6F,cAAgB,SAAS3F,EAAUC,GAO/E,GAAI2F,GAAQ3F,EAASE,SAASC,iBAAiB,kBAC7CC,EAAQC,MAAMC,UAAUC,MAAMC,KAAKmF,EAErCvF,GAAMiB,QAAQ,SAASC,GAWtB,QAASsD,KACR/C,EAAQ+D,EACRjE,EAASkE,EAENhE,EAAQiE,QAAQC,WAClBC,EAAQF,QAAQC,SAAWlE,EAE3BA,EAAQiE,QAAQC,SAChBpE,GAAkBqE,GAGnB/B,EAAOpC,MAAQA,EACfoC,EAAOtC,OAASA,EAtBjB,GAOCqE,GAPGC,EAAM3E,EAAKoC,aAAa,OAC3BO,EAAS9C,SAASiB,cAAc,UAChCwD,EAAgBtE,EAAKoC,aAAa,SAClCmC,EAAiBvE,EAAKoC,aAAa,UACnCwC,EAAS5E,EAAK6E,WACdtE,EAAQ+D,EACRjE,EAASkE,CAoBVjB,KAKA5E,EAASoG,YAAY,mBAAoB,2BAA4BxB,GAErEX,EAAOgC,IAAMA,EAGbC,EAAOG,aAAapC,EAAQ3C,KAQ1BvB,GAAUA"} \ No newline at end of file diff --git a/static/js/reader/hooks/extensions/highlight.js b/static/js/reader/hooks/extensions/highlight.js deleted file mode 100644 index 1dd1c6714..000000000 --- a/static/js/reader/hooks/extensions/highlight.js +++ /dev/null @@ -1,14 +0,0 @@ -EPUBJS.Hooks.register("beforeChapterDisplay").highlight = function(callback, renderer){ - - // EPUBJS.core.addScript("js/libs/jquery.highlight.js", null, renderer.doc.head); - - var s = document.createElement("style"); - s.innerHTML =".highlight { background: yellow; font-weight: normal; }"; - - renderer.render.document.head.appendChild(s); - - if(callback) callback(); - -} - - diff --git a/static/js/reader/libs/jquery.min.js b/static/js/reader/libs/jquery.min.js deleted file mode 100644 index 4024b6622..000000000 --- a/static/js/reader/libs/jquery.min.js +++ /dev/null @@ -1,4 +0,0 @@ -/*! jQuery v2.2.4 | (c) jQuery Foundation | jquery.org/license */ -!function(a,b){"object"==typeof module&&"object"==typeof module.exports?module.exports=a.document?b(a,!0):function(a){if(!a.document)throw new Error("jQuery requires a window with a document");return b(a)}:b(a)}("undefined"!=typeof window?window:this,function(a,b){var c=[],d=a.document,e=c.slice,f=c.concat,g=c.push,h=c.indexOf,i={},j=i.toString,k=i.hasOwnProperty,l={},m="2.2.4",n=function(a,b){return new n.fn.init(a,b)},o=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,p=/^-ms-/,q=/-([\da-z])/gi,r=function(a,b){return b.toUpperCase()};n.fn=n.prototype={jquery:m,constructor:n,selector:"",length:0,toArray:function(){return e.call(this)},get:function(a){return null!=a?0>a?this[a+this.length]:this[a]:e.call(this)},pushStack:function(a){var b=n.merge(this.constructor(),a);return b.prevObject=this,b.context=this.context,b},each:function(a){return n.each(this,a)},map:function(a){return this.pushStack(n.map(this,function(b,c){return a.call(b,c,b)}))},slice:function(){return this.pushStack(e.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(a){var b=this.length,c=+a+(0>a?b:0);return this.pushStack(c>=0&&b>c?[this[c]]:[])},end:function(){return this.prevObject||this.constructor()},push:g,sort:c.sort,splice:c.splice},n.extend=n.fn.extend=function(){var a,b,c,d,e,f,g=arguments[0]||{},h=1,i=arguments.length,j=!1;for("boolean"==typeof g&&(j=g,g=arguments[h]||{},h++),"object"==typeof g||n.isFunction(g)||(g={}),h===i&&(g=this,h--);i>h;h++)if(null!=(a=arguments[h]))for(b in a)c=g[b],d=a[b],g!==d&&(j&&d&&(n.isPlainObject(d)||(e=n.isArray(d)))?(e?(e=!1,f=c&&n.isArray(c)?c:[]):f=c&&n.isPlainObject(c)?c:{},g[b]=n.extend(j,f,d)):void 0!==d&&(g[b]=d));return g},n.extend({expando:"jQuery"+(m+Math.random()).replace(/\D/g,""),isReady:!0,error:function(a){throw new Error(a)},noop:function(){},isFunction:function(a){return"function"===n.type(a)},isArray:Array.isArray,isWindow:function(a){return null!=a&&a===a.window},isNumeric:function(a){var b=a&&a.toString();return!n.isArray(a)&&b-parseFloat(b)+1>=0},isPlainObject:function(a){var b;if("object"!==n.type(a)||a.nodeType||n.isWindow(a))return!1;if(a.constructor&&!k.call(a,"constructor")&&!k.call(a.constructor.prototype||{},"isPrototypeOf"))return!1;for(b in a);return void 0===b||k.call(a,b)},isEmptyObject:function(a){var b;for(b in a)return!1;return!0},type:function(a){return null==a?a+"":"object"==typeof a||"function"==typeof a?i[j.call(a)]||"object":typeof a},globalEval:function(a){var b,c=eval;a=n.trim(a),a&&(1===a.indexOf("use strict")?(b=d.createElement("script"),b.text=a,d.head.appendChild(b).parentNode.removeChild(b)):c(a))},camelCase:function(a){return a.replace(p,"ms-").replace(q,r)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toLowerCase()===b.toLowerCase()},each:function(a,b){var c,d=0;if(s(a)){for(c=a.length;c>d;d++)if(b.call(a[d],d,a[d])===!1)break}else for(d in a)if(b.call(a[d],d,a[d])===!1)break;return a},trim:function(a){return null==a?"":(a+"").replace(o,"")},makeArray:function(a,b){var c=b||[];return null!=a&&(s(Object(a))?n.merge(c,"string"==typeof a?[a]:a):g.call(c,a)),c},inArray:function(a,b,c){return null==b?-1:h.call(b,a,c)},merge:function(a,b){for(var c=+b.length,d=0,e=a.length;c>d;d++)a[e++]=b[d];return a.length=e,a},grep:function(a,b,c){for(var d,e=[],f=0,g=a.length,h=!c;g>f;f++)d=!b(a[f],f),d!==h&&e.push(a[f]);return e},map:function(a,b,c){var d,e,g=0,h=[];if(s(a))for(d=a.length;d>g;g++)e=b(a[g],g,c),null!=e&&h.push(e);else for(g in a)e=b(a[g],g,c),null!=e&&h.push(e);return f.apply([],h)},guid:1,proxy:function(a,b){var c,d,f;return"string"==typeof b&&(c=a[b],b=a,a=c),n.isFunction(a)?(d=e.call(arguments,2),f=function(){return a.apply(b||this,d.concat(e.call(arguments)))},f.guid=a.guid=a.guid||n.guid++,f):void 0},now:Date.now,support:l}),"function"==typeof Symbol&&(n.fn[Symbol.iterator]=c[Symbol.iterator]),n.each("Boolean Number String Function Array Date RegExp Object Error Symbol".split(" "),function(a,b){i["[object "+b+"]"]=b.toLowerCase()});function s(a){var b=!!a&&"length"in a&&a.length,c=n.type(a);return"function"===c||n.isWindow(a)?!1:"array"===c||0===b||"number"==typeof b&&b>0&&b-1 in a}var t=function(a){var b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u="sizzle"+1*new Date,v=a.document,w=0,x=0,y=ga(),z=ga(),A=ga(),B=function(a,b){return a===b&&(l=!0),0},C=1<<31,D={}.hasOwnProperty,E=[],F=E.pop,G=E.push,H=E.push,I=E.slice,J=function(a,b){for(var c=0,d=a.length;d>c;c++)if(a[c]===b)return c;return-1},K="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",L="[\\x20\\t\\r\\n\\f]",M="(?:\\\\.|[\\w-]|[^\\x00-\\xa0])+",N="\\["+L+"*("+M+")(?:"+L+"*([*^$|!~]?=)"+L+"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|("+M+"))|)"+L+"*\\]",O=":("+M+")(?:\\((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+N+")*)|.*)\\)|)",P=new RegExp(L+"+","g"),Q=new RegExp("^"+L+"+|((?:^|[^\\\\])(?:\\\\.)*)"+L+"+$","g"),R=new RegExp("^"+L+"*,"+L+"*"),S=new RegExp("^"+L+"*([>+~]|"+L+")"+L+"*"),T=new RegExp("="+L+"*([^\\]'\"]*?)"+L+"*\\]","g"),U=new RegExp(O),V=new RegExp("^"+M+"$"),W={ID:new RegExp("^#("+M+")"),CLASS:new RegExp("^\\.("+M+")"),TAG:new RegExp("^("+M+"|[*])"),ATTR:new RegExp("^"+N),PSEUDO:new RegExp("^"+O),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+L+"*(even|odd|(([+-]|)(\\d*)n|)"+L+"*(?:([+-]|)"+L+"*(\\d+)|))"+L+"*\\)|)","i"),bool:new RegExp("^(?:"+K+")$","i"),needsContext:new RegExp("^"+L+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+L+"*((?:-\\d)?\\d*)"+L+"*\\)|)(?=[^-]|$)","i")},X=/^(?:input|select|textarea|button)$/i,Y=/^h\d$/i,Z=/^[^{]+\{\s*\[native \w/,$=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,_=/[+~]/,aa=/'|\\/g,ba=new RegExp("\\\\([\\da-f]{1,6}"+L+"?|("+L+")|.)","ig"),ca=function(a,b,c){var d="0x"+b-65536;return d!==d||c?b:0>d?String.fromCharCode(d+65536):String.fromCharCode(d>>10|55296,1023&d|56320)},da=function(){m()};try{H.apply(E=I.call(v.childNodes),v.childNodes),E[v.childNodes.length].nodeType}catch(ea){H={apply:E.length?function(a,b){G.apply(a,I.call(b))}:function(a,b){var c=a.length,d=0;while(a[c++]=b[d++]);a.length=c-1}}}function fa(a,b,d,e){var f,h,j,k,l,o,r,s,w=b&&b.ownerDocument,x=b?b.nodeType:9;if(d=d||[],"string"!=typeof a||!a||1!==x&&9!==x&&11!==x)return d;if(!e&&((b?b.ownerDocument||b:v)!==n&&m(b),b=b||n,p)){if(11!==x&&(o=$.exec(a)))if(f=o[1]){if(9===x){if(!(j=b.getElementById(f)))return d;if(j.id===f)return d.push(j),d}else if(w&&(j=w.getElementById(f))&&t(b,j)&&j.id===f)return d.push(j),d}else{if(o[2])return H.apply(d,b.getElementsByTagName(a)),d;if((f=o[3])&&c.getElementsByClassName&&b.getElementsByClassName)return H.apply(d,b.getElementsByClassName(f)),d}if(c.qsa&&!A[a+" "]&&(!q||!q.test(a))){if(1!==x)w=b,s=a;else if("object"!==b.nodeName.toLowerCase()){(k=b.getAttribute("id"))?k=k.replace(aa,"\\$&"):b.setAttribute("id",k=u),r=g(a),h=r.length,l=V.test(k)?"#"+k:"[id='"+k+"']";while(h--)r[h]=l+" "+qa(r[h]);s=r.join(","),w=_.test(a)&&oa(b.parentNode)||b}if(s)try{return H.apply(d,w.querySelectorAll(s)),d}catch(y){}finally{k===u&&b.removeAttribute("id")}}}return i(a.replace(Q,"$1"),b,d,e)}function ga(){var a=[];function b(c,e){return a.push(c+" ")>d.cacheLength&&delete b[a.shift()],b[c+" "]=e}return b}function ha(a){return a[u]=!0,a}function ia(a){var b=n.createElement("div");try{return!!a(b)}catch(c){return!1}finally{b.parentNode&&b.parentNode.removeChild(b),b=null}}function ja(a,b){var c=a.split("|"),e=c.length;while(e--)d.attrHandle[c[e]]=b}function ka(a,b){var c=b&&a,d=c&&1===a.nodeType&&1===b.nodeType&&(~b.sourceIndex||C)-(~a.sourceIndex||C);if(d)return d;if(c)while(c=c.nextSibling)if(c===b)return-1;return a?1:-1}function la(a){return function(b){var c=b.nodeName.toLowerCase();return"input"===c&&b.type===a}}function ma(a){return function(b){var c=b.nodeName.toLowerCase();return("input"===c||"button"===c)&&b.type===a}}function na(a){return ha(function(b){return b=+b,ha(function(c,d){var e,f=a([],c.length,b),g=f.length;while(g--)c[e=f[g]]&&(c[e]=!(d[e]=c[e]))})})}function oa(a){return a&&"undefined"!=typeof a.getElementsByTagName&&a}c=fa.support={},f=fa.isXML=function(a){var b=a&&(a.ownerDocument||a).documentElement;return b?"HTML"!==b.nodeName:!1},m=fa.setDocument=function(a){var b,e,g=a?a.ownerDocument||a:v;return g!==n&&9===g.nodeType&&g.documentElement?(n=g,o=n.documentElement,p=!f(n),(e=n.defaultView)&&e.top!==e&&(e.addEventListener?e.addEventListener("unload",da,!1):e.attachEvent&&e.attachEvent("onunload",da)),c.attributes=ia(function(a){return a.className="i",!a.getAttribute("className")}),c.getElementsByTagName=ia(function(a){return a.appendChild(n.createComment("")),!a.getElementsByTagName("*").length}),c.getElementsByClassName=Z.test(n.getElementsByClassName),c.getById=ia(function(a){return o.appendChild(a).id=u,!n.getElementsByName||!n.getElementsByName(u).length}),c.getById?(d.find.ID=function(a,b){if("undefined"!=typeof b.getElementById&&p){var c=b.getElementById(a);return c?[c]:[]}},d.filter.ID=function(a){var b=a.replace(ba,ca);return function(a){return a.getAttribute("id")===b}}):(delete d.find.ID,d.filter.ID=function(a){var b=a.replace(ba,ca);return function(a){var c="undefined"!=typeof a.getAttributeNode&&a.getAttributeNode("id");return c&&c.value===b}}),d.find.TAG=c.getElementsByTagName?function(a,b){return"undefined"!=typeof b.getElementsByTagName?b.getElementsByTagName(a):c.qsa?b.querySelectorAll(a):void 0}:function(a,b){var c,d=[],e=0,f=b.getElementsByTagName(a);if("*"===a){while(c=f[e++])1===c.nodeType&&d.push(c);return d}return f},d.find.CLASS=c.getElementsByClassName&&function(a,b){return"undefined"!=typeof b.getElementsByClassName&&p?b.getElementsByClassName(a):void 0},r=[],q=[],(c.qsa=Z.test(n.querySelectorAll))&&(ia(function(a){o.appendChild(a).innerHTML="",a.querySelectorAll("[msallowcapture^='']").length&&q.push("[*^$]="+L+"*(?:''|\"\")"),a.querySelectorAll("[selected]").length||q.push("\\["+L+"*(?:value|"+K+")"),a.querySelectorAll("[id~="+u+"-]").length||q.push("~="),a.querySelectorAll(":checked").length||q.push(":checked"),a.querySelectorAll("a#"+u+"+*").length||q.push(".#.+[+~]")}),ia(function(a){var b=n.createElement("input");b.setAttribute("type","hidden"),a.appendChild(b).setAttribute("name","D"),a.querySelectorAll("[name=d]").length&&q.push("name"+L+"*[*^$|!~]?="),a.querySelectorAll(":enabled").length||q.push(":enabled",":disabled"),a.querySelectorAll("*,:x"),q.push(",.*:")})),(c.matchesSelector=Z.test(s=o.matches||o.webkitMatchesSelector||o.mozMatchesSelector||o.oMatchesSelector||o.msMatchesSelector))&&ia(function(a){c.disconnectedMatch=s.call(a,"div"),s.call(a,"[s!='']:x"),r.push("!=",O)}),q=q.length&&new RegExp(q.join("|")),r=r.length&&new RegExp(r.join("|")),b=Z.test(o.compareDocumentPosition),t=b||Z.test(o.contains)?function(a,b){var c=9===a.nodeType?a.documentElement:a,d=b&&b.parentNode;return a===d||!(!d||1!==d.nodeType||!(c.contains?c.contains(d):a.compareDocumentPosition&&16&a.compareDocumentPosition(d)))}:function(a,b){if(b)while(b=b.parentNode)if(b===a)return!0;return!1},B=b?function(a,b){if(a===b)return l=!0,0;var d=!a.compareDocumentPosition-!b.compareDocumentPosition;return d?d:(d=(a.ownerDocument||a)===(b.ownerDocument||b)?a.compareDocumentPosition(b):1,1&d||!c.sortDetached&&b.compareDocumentPosition(a)===d?a===n||a.ownerDocument===v&&t(v,a)?-1:b===n||b.ownerDocument===v&&t(v,b)?1:k?J(k,a)-J(k,b):0:4&d?-1:1)}:function(a,b){if(a===b)return l=!0,0;var c,d=0,e=a.parentNode,f=b.parentNode,g=[a],h=[b];if(!e||!f)return a===n?-1:b===n?1:e?-1:f?1:k?J(k,a)-J(k,b):0;if(e===f)return ka(a,b);c=a;while(c=c.parentNode)g.unshift(c);c=b;while(c=c.parentNode)h.unshift(c);while(g[d]===h[d])d++;return d?ka(g[d],h[d]):g[d]===v?-1:h[d]===v?1:0},n):n},fa.matches=function(a,b){return fa(a,null,null,b)},fa.matchesSelector=function(a,b){if((a.ownerDocument||a)!==n&&m(a),b=b.replace(T,"='$1']"),c.matchesSelector&&p&&!A[b+" "]&&(!r||!r.test(b))&&(!q||!q.test(b)))try{var d=s.call(a,b);if(d||c.disconnectedMatch||a.document&&11!==a.document.nodeType)return d}catch(e){}return fa(b,n,null,[a]).length>0},fa.contains=function(a,b){return(a.ownerDocument||a)!==n&&m(a),t(a,b)},fa.attr=function(a,b){(a.ownerDocument||a)!==n&&m(a);var e=d.attrHandle[b.toLowerCase()],f=e&&D.call(d.attrHandle,b.toLowerCase())?e(a,b,!p):void 0;return void 0!==f?f:c.attributes||!p?a.getAttribute(b):(f=a.getAttributeNode(b))&&f.specified?f.value:null},fa.error=function(a){throw new Error("Syntax error, unrecognized expression: "+a)},fa.uniqueSort=function(a){var b,d=[],e=0,f=0;if(l=!c.detectDuplicates,k=!c.sortStable&&a.slice(0),a.sort(B),l){while(b=a[f++])b===a[f]&&(e=d.push(f));while(e--)a.splice(d[e],1)}return k=null,a},e=fa.getText=function(a){var b,c="",d=0,f=a.nodeType;if(f){if(1===f||9===f||11===f){if("string"==typeof a.textContent)return a.textContent;for(a=a.firstChild;a;a=a.nextSibling)c+=e(a)}else if(3===f||4===f)return a.nodeValue}else while(b=a[d++])c+=e(b);return c},d=fa.selectors={cacheLength:50,createPseudo:ha,match:W,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(a){return a[1]=a[1].replace(ba,ca),a[3]=(a[3]||a[4]||a[5]||"").replace(ba,ca),"~="===a[2]&&(a[3]=" "+a[3]+" "),a.slice(0,4)},CHILD:function(a){return a[1]=a[1].toLowerCase(),"nth"===a[1].slice(0,3)?(a[3]||fa.error(a[0]),a[4]=+(a[4]?a[5]+(a[6]||1):2*("even"===a[3]||"odd"===a[3])),a[5]=+(a[7]+a[8]||"odd"===a[3])):a[3]&&fa.error(a[0]),a},PSEUDO:function(a){var b,c=!a[6]&&a[2];return W.CHILD.test(a[0])?null:(a[3]?a[2]=a[4]||a[5]||"":c&&U.test(c)&&(b=g(c,!0))&&(b=c.indexOf(")",c.length-b)-c.length)&&(a[0]=a[0].slice(0,b),a[2]=c.slice(0,b)),a.slice(0,3))}},filter:{TAG:function(a){var b=a.replace(ba,ca).toLowerCase();return"*"===a?function(){return!0}:function(a){return a.nodeName&&a.nodeName.toLowerCase()===b}},CLASS:function(a){var b=y[a+" "];return b||(b=new RegExp("(^|"+L+")"+a+"("+L+"|$)"))&&y(a,function(a){return b.test("string"==typeof a.className&&a.className||"undefined"!=typeof a.getAttribute&&a.getAttribute("class")||"")})},ATTR:function(a,b,c){return function(d){var e=fa.attr(d,a);return null==e?"!="===b:b?(e+="","="===b?e===c:"!="===b?e!==c:"^="===b?c&&0===e.indexOf(c):"*="===b?c&&e.indexOf(c)>-1:"$="===b?c&&e.slice(-c.length)===c:"~="===b?(" "+e.replace(P," ")+" ").indexOf(c)>-1:"|="===b?e===c||e.slice(0,c.length+1)===c+"-":!1):!0}},CHILD:function(a,b,c,d,e){var f="nth"!==a.slice(0,3),g="last"!==a.slice(-4),h="of-type"===b;return 1===d&&0===e?function(a){return!!a.parentNode}:function(b,c,i){var j,k,l,m,n,o,p=f!==g?"nextSibling":"previousSibling",q=b.parentNode,r=h&&b.nodeName.toLowerCase(),s=!i&&!h,t=!1;if(q){if(f){while(p){m=b;while(m=m[p])if(h?m.nodeName.toLowerCase()===r:1===m.nodeType)return!1;o=p="only"===a&&!o&&"nextSibling"}return!0}if(o=[g?q.firstChild:q.lastChild],g&&s){m=q,l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),j=k[a]||[],n=j[0]===w&&j[1],t=n&&j[2],m=n&&q.childNodes[n];while(m=++n&&m&&m[p]||(t=n=0)||o.pop())if(1===m.nodeType&&++t&&m===b){k[a]=[w,n,t];break}}else if(s&&(m=b,l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),j=k[a]||[],n=j[0]===w&&j[1],t=n),t===!1)while(m=++n&&m&&m[p]||(t=n=0)||o.pop())if((h?m.nodeName.toLowerCase()===r:1===m.nodeType)&&++t&&(s&&(l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),k[a]=[w,t]),m===b))break;return t-=e,t===d||t%d===0&&t/d>=0}}},PSEUDO:function(a,b){var c,e=d.pseudos[a]||d.setFilters[a.toLowerCase()]||fa.error("unsupported pseudo: "+a);return e[u]?e(b):e.length>1?(c=[a,a,"",b],d.setFilters.hasOwnProperty(a.toLowerCase())?ha(function(a,c){var d,f=e(a,b),g=f.length;while(g--)d=J(a,f[g]),a[d]=!(c[d]=f[g])}):function(a){return e(a,0,c)}):e}},pseudos:{not:ha(function(a){var b=[],c=[],d=h(a.replace(Q,"$1"));return d[u]?ha(function(a,b,c,e){var f,g=d(a,null,e,[]),h=a.length;while(h--)(f=g[h])&&(a[h]=!(b[h]=f))}):function(a,e,f){return b[0]=a,d(b,null,f,c),b[0]=null,!c.pop()}}),has:ha(function(a){return function(b){return fa(a,b).length>0}}),contains:ha(function(a){return a=a.replace(ba,ca),function(b){return(b.textContent||b.innerText||e(b)).indexOf(a)>-1}}),lang:ha(function(a){return V.test(a||"")||fa.error("unsupported lang: "+a),a=a.replace(ba,ca).toLowerCase(),function(b){var c;do if(c=p?b.lang:b.getAttribute("xml:lang")||b.getAttribute("lang"))return c=c.toLowerCase(),c===a||0===c.indexOf(a+"-");while((b=b.parentNode)&&1===b.nodeType);return!1}}),target:function(b){var c=a.location&&a.location.hash;return c&&c.slice(1)===b.id},root:function(a){return a===o},focus:function(a){return a===n.activeElement&&(!n.hasFocus||n.hasFocus())&&!!(a.type||a.href||~a.tabIndex)},enabled:function(a){return a.disabled===!1},disabled:function(a){return a.disabled===!0},checked:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&!!a.checked||"option"===b&&!!a.selected},selected:function(a){return a.parentNode&&a.parentNode.selectedIndex,a.selected===!0},empty:function(a){for(a=a.firstChild;a;a=a.nextSibling)if(a.nodeType<6)return!1;return!0},parent:function(a){return!d.pseudos.empty(a)},header:function(a){return Y.test(a.nodeName)},input:function(a){return X.test(a.nodeName)},button:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&"button"===a.type||"button"===b},text:function(a){var b;return"input"===a.nodeName.toLowerCase()&&"text"===a.type&&(null==(b=a.getAttribute("type"))||"text"===b.toLowerCase())},first:na(function(){return[0]}),last:na(function(a,b){return[b-1]}),eq:na(function(a,b,c){return[0>c?c+b:c]}),even:na(function(a,b){for(var c=0;b>c;c+=2)a.push(c);return a}),odd:na(function(a,b){for(var c=1;b>c;c+=2)a.push(c);return a}),lt:na(function(a,b,c){for(var d=0>c?c+b:c;--d>=0;)a.push(d);return a}),gt:na(function(a,b,c){for(var d=0>c?c+b:c;++db;b++)d+=a[b].value;return d}function ra(a,b,c){var d=b.dir,e=c&&"parentNode"===d,f=x++;return b.first?function(b,c,f){while(b=b[d])if(1===b.nodeType||e)return a(b,c,f)}:function(b,c,g){var h,i,j,k=[w,f];if(g){while(b=b[d])if((1===b.nodeType||e)&&a(b,c,g))return!0}else while(b=b[d])if(1===b.nodeType||e){if(j=b[u]||(b[u]={}),i=j[b.uniqueID]||(j[b.uniqueID]={}),(h=i[d])&&h[0]===w&&h[1]===f)return k[2]=h[2];if(i[d]=k,k[2]=a(b,c,g))return!0}}}function sa(a){return a.length>1?function(b,c,d){var e=a.length;while(e--)if(!a[e](b,c,d))return!1;return!0}:a[0]}function ta(a,b,c){for(var d=0,e=b.length;e>d;d++)fa(a,b[d],c);return c}function ua(a,b,c,d,e){for(var f,g=[],h=0,i=a.length,j=null!=b;i>h;h++)(f=a[h])&&(c&&!c(f,d,e)||(g.push(f),j&&b.push(h)));return g}function va(a,b,c,d,e,f){return d&&!d[u]&&(d=va(d)),e&&!e[u]&&(e=va(e,f)),ha(function(f,g,h,i){var j,k,l,m=[],n=[],o=g.length,p=f||ta(b||"*",h.nodeType?[h]:h,[]),q=!a||!f&&b?p:ua(p,m,a,h,i),r=c?e||(f?a:o||d)?[]:g:q;if(c&&c(q,r,h,i),d){j=ua(r,n),d(j,[],h,i),k=j.length;while(k--)(l=j[k])&&(r[n[k]]=!(q[n[k]]=l))}if(f){if(e||a){if(e){j=[],k=r.length;while(k--)(l=r[k])&&j.push(q[k]=l);e(null,r=[],j,i)}k=r.length;while(k--)(l=r[k])&&(j=e?J(f,l):m[k])>-1&&(f[j]=!(g[j]=l))}}else r=ua(r===g?r.splice(o,r.length):r),e?e(null,g,r,i):H.apply(g,r)})}function wa(a){for(var b,c,e,f=a.length,g=d.relative[a[0].type],h=g||d.relative[" "],i=g?1:0,k=ra(function(a){return a===b},h,!0),l=ra(function(a){return J(b,a)>-1},h,!0),m=[function(a,c,d){var e=!g&&(d||c!==j)||((b=c).nodeType?k(a,c,d):l(a,c,d));return b=null,e}];f>i;i++)if(c=d.relative[a[i].type])m=[ra(sa(m),c)];else{if(c=d.filter[a[i].type].apply(null,a[i].matches),c[u]){for(e=++i;f>e;e++)if(d.relative[a[e].type])break;return va(i>1&&sa(m),i>1&&qa(a.slice(0,i-1).concat({value:" "===a[i-2].type?"*":""})).replace(Q,"$1"),c,e>i&&wa(a.slice(i,e)),f>e&&wa(a=a.slice(e)),f>e&&qa(a))}m.push(c)}return sa(m)}function xa(a,b){var c=b.length>0,e=a.length>0,f=function(f,g,h,i,k){var l,o,q,r=0,s="0",t=f&&[],u=[],v=j,x=f||e&&d.find.TAG("*",k),y=w+=null==v?1:Math.random()||.1,z=x.length;for(k&&(j=g===n||g||k);s!==z&&null!=(l=x[s]);s++){if(e&&l){o=0,g||l.ownerDocument===n||(m(l),h=!p);while(q=a[o++])if(q(l,g||n,h)){i.push(l);break}k&&(w=y)}c&&((l=!q&&l)&&r--,f&&t.push(l))}if(r+=s,c&&s!==r){o=0;while(q=b[o++])q(t,u,g,h);if(f){if(r>0)while(s--)t[s]||u[s]||(u[s]=F.call(i));u=ua(u)}H.apply(i,u),k&&!f&&u.length>0&&r+b.length>1&&fa.uniqueSort(i)}return k&&(w=y,j=v),t};return c?ha(f):f}return h=fa.compile=function(a,b){var c,d=[],e=[],f=A[a+" "];if(!f){b||(b=g(a)),c=b.length;while(c--)f=wa(b[c]),f[u]?d.push(f):e.push(f);f=A(a,xa(e,d)),f.selector=a}return f},i=fa.select=function(a,b,e,f){var i,j,k,l,m,n="function"==typeof a&&a,o=!f&&g(a=n.selector||a);if(e=e||[],1===o.length){if(j=o[0]=o[0].slice(0),j.length>2&&"ID"===(k=j[0]).type&&c.getById&&9===b.nodeType&&p&&d.relative[j[1].type]){if(b=(d.find.ID(k.matches[0].replace(ba,ca),b)||[])[0],!b)return e;n&&(b=b.parentNode),a=a.slice(j.shift().value.length)}i=W.needsContext.test(a)?0:j.length;while(i--){if(k=j[i],d.relative[l=k.type])break;if((m=d.find[l])&&(f=m(k.matches[0].replace(ba,ca),_.test(j[0].type)&&oa(b.parentNode)||b))){if(j.splice(i,1),a=f.length&&qa(j),!a)return H.apply(e,f),e;break}}}return(n||h(a,o))(f,b,!p,e,!b||_.test(a)&&oa(b.parentNode)||b),e},c.sortStable=u.split("").sort(B).join("")===u,c.detectDuplicates=!!l,m(),c.sortDetached=ia(function(a){return 1&a.compareDocumentPosition(n.createElement("div"))}),ia(function(a){return a.innerHTML="","#"===a.firstChild.getAttribute("href")})||ja("type|href|height|width",function(a,b,c){return c?void 0:a.getAttribute(b,"type"===b.toLowerCase()?1:2)}),c.attributes&&ia(function(a){return a.innerHTML="",a.firstChild.setAttribute("value",""),""===a.firstChild.getAttribute("value")})||ja("value",function(a,b,c){return c||"input"!==a.nodeName.toLowerCase()?void 0:a.defaultValue}),ia(function(a){return null==a.getAttribute("disabled")})||ja(K,function(a,b,c){var d;return c?void 0:a[b]===!0?b.toLowerCase():(d=a.getAttributeNode(b))&&d.specified?d.value:null}),fa}(a);n.find=t,n.expr=t.selectors,n.expr[":"]=n.expr.pseudos,n.uniqueSort=n.unique=t.uniqueSort,n.text=t.getText,n.isXMLDoc=t.isXML,n.contains=t.contains;var u=function(a,b,c){var d=[],e=void 0!==c;while((a=a[b])&&9!==a.nodeType)if(1===a.nodeType){if(e&&n(a).is(c))break;d.push(a)}return d},v=function(a,b){for(var c=[];a;a=a.nextSibling)1===a.nodeType&&a!==b&&c.push(a);return c},w=n.expr.match.needsContext,x=/^<([\w-]+)\s*\/?>(?:<\/\1>|)$/,y=/^.[^:#\[\.,]*$/;function z(a,b,c){if(n.isFunction(b))return n.grep(a,function(a,d){return!!b.call(a,d,a)!==c});if(b.nodeType)return n.grep(a,function(a){return a===b!==c});if("string"==typeof b){if(y.test(b))return n.filter(b,a,c);b=n.filter(b,a)}return n.grep(a,function(a){return h.call(b,a)>-1!==c})}n.filter=function(a,b,c){var d=b[0];return c&&(a=":not("+a+")"),1===b.length&&1===d.nodeType?n.find.matchesSelector(d,a)?[d]:[]:n.find.matches(a,n.grep(b,function(a){return 1===a.nodeType}))},n.fn.extend({find:function(a){var b,c=this.length,d=[],e=this;if("string"!=typeof a)return this.pushStack(n(a).filter(function(){for(b=0;c>b;b++)if(n.contains(e[b],this))return!0}));for(b=0;c>b;b++)n.find(a,e[b],d);return d=this.pushStack(c>1?n.unique(d):d),d.selector=this.selector?this.selector+" "+a:a,d},filter:function(a){return this.pushStack(z(this,a||[],!1))},not:function(a){return this.pushStack(z(this,a||[],!0))},is:function(a){return!!z(this,"string"==typeof a&&w.test(a)?n(a):a||[],!1).length}});var A,B=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]*))$/,C=n.fn.init=function(a,b,c){var e,f;if(!a)return this;if(c=c||A,"string"==typeof a){if(e="<"===a[0]&&">"===a[a.length-1]&&a.length>=3?[null,a,null]:B.exec(a),!e||!e[1]&&b)return!b||b.jquery?(b||c).find(a):this.constructor(b).find(a);if(e[1]){if(b=b instanceof n?b[0]:b,n.merge(this,n.parseHTML(e[1],b&&b.nodeType?b.ownerDocument||b:d,!0)),x.test(e[1])&&n.isPlainObject(b))for(e in b)n.isFunction(this[e])?this[e](b[e]):this.attr(e,b[e]);return this}return f=d.getElementById(e[2]),f&&f.parentNode&&(this.length=1,this[0]=f),this.context=d,this.selector=a,this}return a.nodeType?(this.context=this[0]=a,this.length=1,this):n.isFunction(a)?void 0!==c.ready?c.ready(a):a(n):(void 0!==a.selector&&(this.selector=a.selector,this.context=a.context),n.makeArray(a,this))};C.prototype=n.fn,A=n(d);var D=/^(?:parents|prev(?:Until|All))/,E={children:!0,contents:!0,next:!0,prev:!0};n.fn.extend({has:function(a){var b=n(a,this),c=b.length;return this.filter(function(){for(var a=0;c>a;a++)if(n.contains(this,b[a]))return!0})},closest:function(a,b){for(var c,d=0,e=this.length,f=[],g=w.test(a)||"string"!=typeof a?n(a,b||this.context):0;e>d;d++)for(c=this[d];c&&c!==b;c=c.parentNode)if(c.nodeType<11&&(g?g.index(c)>-1:1===c.nodeType&&n.find.matchesSelector(c,a))){f.push(c);break}return this.pushStack(f.length>1?n.uniqueSort(f):f)},index:function(a){return a?"string"==typeof a?h.call(n(a),this[0]):h.call(this,a.jquery?a[0]:a):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(a,b){return this.pushStack(n.uniqueSort(n.merge(this.get(),n(a,b))))},addBack:function(a){return this.add(null==a?this.prevObject:this.prevObject.filter(a))}});function F(a,b){while((a=a[b])&&1!==a.nodeType);return a}n.each({parent:function(a){var b=a.parentNode;return b&&11!==b.nodeType?b:null},parents:function(a){return u(a,"parentNode")},parentsUntil:function(a,b,c){return u(a,"parentNode",c)},next:function(a){return F(a,"nextSibling")},prev:function(a){return F(a,"previousSibling")},nextAll:function(a){return u(a,"nextSibling")},prevAll:function(a){return u(a,"previousSibling")},nextUntil:function(a,b,c){return u(a,"nextSibling",c)},prevUntil:function(a,b,c){return u(a,"previousSibling",c)},siblings:function(a){return v((a.parentNode||{}).firstChild,a)},children:function(a){return v(a.firstChild)},contents:function(a){return a.contentDocument||n.merge([],a.childNodes)}},function(a,b){n.fn[a]=function(c,d){var e=n.map(this,b,c);return"Until"!==a.slice(-5)&&(d=c),d&&"string"==typeof d&&(e=n.filter(d,e)),this.length>1&&(E[a]||n.uniqueSort(e),D.test(a)&&e.reverse()),this.pushStack(e)}});var G=/\S+/g;function H(a){var b={};return n.each(a.match(G)||[],function(a,c){b[c]=!0}),b}n.Callbacks=function(a){a="string"==typeof a?H(a):n.extend({},a);var b,c,d,e,f=[],g=[],h=-1,i=function(){for(e=a.once,d=b=!0;g.length;h=-1){c=g.shift();while(++h-1)f.splice(c,1),h>=c&&h--}),this},has:function(a){return a?n.inArray(a,f)>-1:f.length>0},empty:function(){return f&&(f=[]),this},disable:function(){return e=g=[],f=c="",this},disabled:function(){return!f},lock:function(){return e=g=[],c||(f=c=""),this},locked:function(){return!!e},fireWith:function(a,c){return e||(c=c||[],c=[a,c.slice?c.slice():c],g.push(c),b||i()),this},fire:function(){return j.fireWith(this,arguments),this},fired:function(){return!!d}};return j},n.extend({Deferred:function(a){var b=[["resolve","done",n.Callbacks("once memory"),"resolved"],["reject","fail",n.Callbacks("once memory"),"rejected"],["notify","progress",n.Callbacks("memory")]],c="pending",d={state:function(){return c},always:function(){return e.done(arguments).fail(arguments),this},then:function(){var a=arguments;return n.Deferred(function(c){n.each(b,function(b,f){var g=n.isFunction(a[b])&&a[b];e[f[1]](function(){var a=g&&g.apply(this,arguments);a&&n.isFunction(a.promise)?a.promise().progress(c.notify).done(c.resolve).fail(c.reject):c[f[0]+"With"](this===d?c.promise():this,g?[a]:arguments)})}),a=null}).promise()},promise:function(a){return null!=a?n.extend(a,d):d}},e={};return d.pipe=d.then,n.each(b,function(a,f){var g=f[2],h=f[3];d[f[1]]=g.add,h&&g.add(function(){c=h},b[1^a][2].disable,b[2][2].lock),e[f[0]]=function(){return e[f[0]+"With"](this===e?d:this,arguments),this},e[f[0]+"With"]=g.fireWith}),d.promise(e),a&&a.call(e,e),e},when:function(a){var b=0,c=e.call(arguments),d=c.length,f=1!==d||a&&n.isFunction(a.promise)?d:0,g=1===f?a:n.Deferred(),h=function(a,b,c){return function(d){b[a]=this,c[a]=arguments.length>1?e.call(arguments):d,c===i?g.notifyWith(b,c):--f||g.resolveWith(b,c)}},i,j,k;if(d>1)for(i=new Array(d),j=new Array(d),k=new Array(d);d>b;b++)c[b]&&n.isFunction(c[b].promise)?c[b].promise().progress(h(b,j,i)).done(h(b,k,c)).fail(g.reject):--f;return f||g.resolveWith(k,c),g.promise()}});var I;n.fn.ready=function(a){return n.ready.promise().done(a),this},n.extend({isReady:!1,readyWait:1,holdReady:function(a){a?n.readyWait++:n.ready(!0)},ready:function(a){(a===!0?--n.readyWait:n.isReady)||(n.isReady=!0,a!==!0&&--n.readyWait>0||(I.resolveWith(d,[n]),n.fn.triggerHandler&&(n(d).triggerHandler("ready"),n(d).off("ready"))))}});function J(){d.removeEventListener("DOMContentLoaded",J),a.removeEventListener("load",J),n.ready()}n.ready.promise=function(b){return I||(I=n.Deferred(),"complete"===d.readyState||"loading"!==d.readyState&&!d.documentElement.doScroll?a.setTimeout(n.ready):(d.addEventListener("DOMContentLoaded",J),a.addEventListener("load",J))),I.promise(b)},n.ready.promise();var K=function(a,b,c,d,e,f,g){var h=0,i=a.length,j=null==c;if("object"===n.type(c)){e=!0;for(h in c)K(a,b,h,c[h],!0,f,g)}else if(void 0!==d&&(e=!0,n.isFunction(d)||(g=!0),j&&(g?(b.call(a,d),b=null):(j=b,b=function(a,b,c){return j.call(n(a),c)})),b))for(;i>h;h++)b(a[h],c,g?d:d.call(a[h],h,b(a[h],c)));return e?a:j?b.call(a):i?b(a[0],c):f},L=function(a){return 1===a.nodeType||9===a.nodeType||!+a.nodeType};function M(){this.expando=n.expando+M.uid++}M.uid=1,M.prototype={register:function(a,b){var c=b||{};return a.nodeType?a[this.expando]=c:Object.defineProperty(a,this.expando,{value:c,writable:!0,configurable:!0}),a[this.expando]},cache:function(a){if(!L(a))return{};var b=a[this.expando];return b||(b={},L(a)&&(a.nodeType?a[this.expando]=b:Object.defineProperty(a,this.expando,{value:b,configurable:!0}))),b},set:function(a,b,c){var d,e=this.cache(a);if("string"==typeof b)e[b]=c;else for(d in b)e[d]=b[d];return e},get:function(a,b){return void 0===b?this.cache(a):a[this.expando]&&a[this.expando][b]},access:function(a,b,c){var d;return void 0===b||b&&"string"==typeof b&&void 0===c?(d=this.get(a,b),void 0!==d?d:this.get(a,n.camelCase(b))):(this.set(a,b,c),void 0!==c?c:b)},remove:function(a,b){var c,d,e,f=a[this.expando];if(void 0!==f){if(void 0===b)this.register(a);else{n.isArray(b)?d=b.concat(b.map(n.camelCase)):(e=n.camelCase(b),b in f?d=[b,e]:(d=e,d=d in f?[d]:d.match(G)||[])),c=d.length;while(c--)delete f[d[c]]}(void 0===b||n.isEmptyObject(f))&&(a.nodeType?a[this.expando]=void 0:delete a[this.expando])}},hasData:function(a){var b=a[this.expando];return void 0!==b&&!n.isEmptyObject(b)}};var N=new M,O=new M,P=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,Q=/[A-Z]/g;function R(a,b,c){var d;if(void 0===c&&1===a.nodeType)if(d="data-"+b.replace(Q,"-$&").toLowerCase(),c=a.getAttribute(d),"string"==typeof c){try{c="true"===c?!0:"false"===c?!1:"null"===c?null:+c+""===c?+c:P.test(c)?n.parseJSON(c):c; -}catch(e){}O.set(a,b,c)}else c=void 0;return c}n.extend({hasData:function(a){return O.hasData(a)||N.hasData(a)},data:function(a,b,c){return O.access(a,b,c)},removeData:function(a,b){O.remove(a,b)},_data:function(a,b,c){return N.access(a,b,c)},_removeData:function(a,b){N.remove(a,b)}}),n.fn.extend({data:function(a,b){var c,d,e,f=this[0],g=f&&f.attributes;if(void 0===a){if(this.length&&(e=O.get(f),1===f.nodeType&&!N.get(f,"hasDataAttrs"))){c=g.length;while(c--)g[c]&&(d=g[c].name,0===d.indexOf("data-")&&(d=n.camelCase(d.slice(5)),R(f,d,e[d])));N.set(f,"hasDataAttrs",!0)}return e}return"object"==typeof a?this.each(function(){O.set(this,a)}):K(this,function(b){var c,d;if(f&&void 0===b){if(c=O.get(f,a)||O.get(f,a.replace(Q,"-$&").toLowerCase()),void 0!==c)return c;if(d=n.camelCase(a),c=O.get(f,d),void 0!==c)return c;if(c=R(f,d,void 0),void 0!==c)return c}else d=n.camelCase(a),this.each(function(){var c=O.get(this,d);O.set(this,d,b),a.indexOf("-")>-1&&void 0!==c&&O.set(this,a,b)})},null,b,arguments.length>1,null,!0)},removeData:function(a){return this.each(function(){O.remove(this,a)})}}),n.extend({queue:function(a,b,c){var d;return a?(b=(b||"fx")+"queue",d=N.get(a,b),c&&(!d||n.isArray(c)?d=N.access(a,b,n.makeArray(c)):d.push(c)),d||[]):void 0},dequeue:function(a,b){b=b||"fx";var c=n.queue(a,b),d=c.length,e=c.shift(),f=n._queueHooks(a,b),g=function(){n.dequeue(a,b)};"inprogress"===e&&(e=c.shift(),d--),e&&("fx"===b&&c.unshift("inprogress"),delete f.stop,e.call(a,g,f)),!d&&f&&f.empty.fire()},_queueHooks:function(a,b){var c=b+"queueHooks";return N.get(a,c)||N.access(a,c,{empty:n.Callbacks("once memory").add(function(){N.remove(a,[b+"queue",c])})})}}),n.fn.extend({queue:function(a,b){var c=2;return"string"!=typeof a&&(b=a,a="fx",c--),arguments.length",""],thead:[1,"","
      "],col:[2,"","
      "],tr:[2,"","
      "],td:[3,"","
      "],_default:[0,"",""]};$.optgroup=$.option,$.tbody=$.tfoot=$.colgroup=$.caption=$.thead,$.th=$.td;function _(a,b){var c="undefined"!=typeof a.getElementsByTagName?a.getElementsByTagName(b||"*"):"undefined"!=typeof a.querySelectorAll?a.querySelectorAll(b||"*"):[];return void 0===b||b&&n.nodeName(a,b)?n.merge([a],c):c}function aa(a,b){for(var c=0,d=a.length;d>c;c++)N.set(a[c],"globalEval",!b||N.get(b[c],"globalEval"))}var ba=/<|&#?\w+;/;function ca(a,b,c,d,e){for(var f,g,h,i,j,k,l=b.createDocumentFragment(),m=[],o=0,p=a.length;p>o;o++)if(f=a[o],f||0===f)if("object"===n.type(f))n.merge(m,f.nodeType?[f]:f);else if(ba.test(f)){g=g||l.appendChild(b.createElement("div")),h=(Y.exec(f)||["",""])[1].toLowerCase(),i=$[h]||$._default,g.innerHTML=i[1]+n.htmlPrefilter(f)+i[2],k=i[0];while(k--)g=g.lastChild;n.merge(m,g.childNodes),g=l.firstChild,g.textContent=""}else m.push(b.createTextNode(f));l.textContent="",o=0;while(f=m[o++])if(d&&n.inArray(f,d)>-1)e&&e.push(f);else if(j=n.contains(f.ownerDocument,f),g=_(l.appendChild(f),"script"),j&&aa(g),c){k=0;while(f=g[k++])Z.test(f.type||"")&&c.push(f)}return l}!function(){var a=d.createDocumentFragment(),b=a.appendChild(d.createElement("div")),c=d.createElement("input");c.setAttribute("type","radio"),c.setAttribute("checked","checked"),c.setAttribute("name","t"),b.appendChild(c),l.checkClone=b.cloneNode(!0).cloneNode(!0).lastChild.checked,b.innerHTML="",l.noCloneChecked=!!b.cloneNode(!0).lastChild.defaultValue}();var da=/^key/,ea=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,fa=/^([^.]*)(?:\.(.+)|)/;function ga(){return!0}function ha(){return!1}function ia(){try{return d.activeElement}catch(a){}}function ja(a,b,c,d,e,f){var g,h;if("object"==typeof b){"string"!=typeof c&&(d=d||c,c=void 0);for(h in b)ja(a,h,c,d,b[h],f);return a}if(null==d&&null==e?(e=c,d=c=void 0):null==e&&("string"==typeof c?(e=d,d=void 0):(e=d,d=c,c=void 0)),e===!1)e=ha;else if(!e)return a;return 1===f&&(g=e,e=function(a){return n().off(a),g.apply(this,arguments)},e.guid=g.guid||(g.guid=n.guid++)),a.each(function(){n.event.add(this,b,e,d,c)})}n.event={global:{},add:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,o,p,q,r=N.get(a);if(r){c.handler&&(f=c,c=f.handler,e=f.selector),c.guid||(c.guid=n.guid++),(i=r.events)||(i=r.events={}),(g=r.handle)||(g=r.handle=function(b){return"undefined"!=typeof n&&n.event.triggered!==b.type?n.event.dispatch.apply(a,arguments):void 0}),b=(b||"").match(G)||[""],j=b.length;while(j--)h=fa.exec(b[j])||[],o=q=h[1],p=(h[2]||"").split(".").sort(),o&&(l=n.event.special[o]||{},o=(e?l.delegateType:l.bindType)||o,l=n.event.special[o]||{},k=n.extend({type:o,origType:q,data:d,handler:c,guid:c.guid,selector:e,needsContext:e&&n.expr.match.needsContext.test(e),namespace:p.join(".")},f),(m=i[o])||(m=i[o]=[],m.delegateCount=0,l.setup&&l.setup.call(a,d,p,g)!==!1||a.addEventListener&&a.addEventListener(o,g)),l.add&&(l.add.call(a,k),k.handler.guid||(k.handler.guid=c.guid)),e?m.splice(m.delegateCount++,0,k):m.push(k),n.event.global[o]=!0)}},remove:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,o,p,q,r=N.hasData(a)&&N.get(a);if(r&&(i=r.events)){b=(b||"").match(G)||[""],j=b.length;while(j--)if(h=fa.exec(b[j])||[],o=q=h[1],p=(h[2]||"").split(".").sort(),o){l=n.event.special[o]||{},o=(d?l.delegateType:l.bindType)||o,m=i[o]||[],h=h[2]&&new RegExp("(^|\\.)"+p.join("\\.(?:.*\\.|)")+"(\\.|$)"),g=f=m.length;while(f--)k=m[f],!e&&q!==k.origType||c&&c.guid!==k.guid||h&&!h.test(k.namespace)||d&&d!==k.selector&&("**"!==d||!k.selector)||(m.splice(f,1),k.selector&&m.delegateCount--,l.remove&&l.remove.call(a,k));g&&!m.length&&(l.teardown&&l.teardown.call(a,p,r.handle)!==!1||n.removeEvent(a,o,r.handle),delete i[o])}else for(o in i)n.event.remove(a,o+b[j],c,d,!0);n.isEmptyObject(i)&&N.remove(a,"handle events")}},dispatch:function(a){a=n.event.fix(a);var b,c,d,f,g,h=[],i=e.call(arguments),j=(N.get(this,"events")||{})[a.type]||[],k=n.event.special[a.type]||{};if(i[0]=a,a.delegateTarget=this,!k.preDispatch||k.preDispatch.call(this,a)!==!1){h=n.event.handlers.call(this,a,j),b=0;while((f=h[b++])&&!a.isPropagationStopped()){a.currentTarget=f.elem,c=0;while((g=f.handlers[c++])&&!a.isImmediatePropagationStopped())a.rnamespace&&!a.rnamespace.test(g.namespace)||(a.handleObj=g,a.data=g.data,d=((n.event.special[g.origType]||{}).handle||g.handler).apply(f.elem,i),void 0!==d&&(a.result=d)===!1&&(a.preventDefault(),a.stopPropagation()))}return k.postDispatch&&k.postDispatch.call(this,a),a.result}},handlers:function(a,b){var c,d,e,f,g=[],h=b.delegateCount,i=a.target;if(h&&i.nodeType&&("click"!==a.type||isNaN(a.button)||a.button<1))for(;i!==this;i=i.parentNode||this)if(1===i.nodeType&&(i.disabled!==!0||"click"!==a.type)){for(d=[],c=0;h>c;c++)f=b[c],e=f.selector+" ",void 0===d[e]&&(d[e]=f.needsContext?n(e,this).index(i)>-1:n.find(e,this,null,[i]).length),d[e]&&d.push(f);d.length&&g.push({elem:i,handlers:d})}return h]*)\/>/gi,la=/\s*$/g;function pa(a,b){return n.nodeName(a,"table")&&n.nodeName(11!==b.nodeType?b:b.firstChild,"tr")?a.getElementsByTagName("tbody")[0]||a.appendChild(a.ownerDocument.createElement("tbody")):a}function qa(a){return a.type=(null!==a.getAttribute("type"))+"/"+a.type,a}function ra(a){var b=na.exec(a.type);return b?a.type=b[1]:a.removeAttribute("type"),a}function sa(a,b){var c,d,e,f,g,h,i,j;if(1===b.nodeType){if(N.hasData(a)&&(f=N.access(a),g=N.set(b,f),j=f.events)){delete g.handle,g.events={};for(e in j)for(c=0,d=j[e].length;d>c;c++)n.event.add(b,e,j[e][c])}O.hasData(a)&&(h=O.access(a),i=n.extend({},h),O.set(b,i))}}function ta(a,b){var c=b.nodeName.toLowerCase();"input"===c&&X.test(a.type)?b.checked=a.checked:"input"!==c&&"textarea"!==c||(b.defaultValue=a.defaultValue)}function ua(a,b,c,d){b=f.apply([],b);var e,g,h,i,j,k,m=0,o=a.length,p=o-1,q=b[0],r=n.isFunction(q);if(r||o>1&&"string"==typeof q&&!l.checkClone&&ma.test(q))return a.each(function(e){var f=a.eq(e);r&&(b[0]=q.call(this,e,f.html())),ua(f,b,c,d)});if(o&&(e=ca(b,a[0].ownerDocument,!1,a,d),g=e.firstChild,1===e.childNodes.length&&(e=g),g||d)){for(h=n.map(_(e,"script"),qa),i=h.length;o>m;m++)j=e,m!==p&&(j=n.clone(j,!0,!0),i&&n.merge(h,_(j,"script"))),c.call(a[m],j,m);if(i)for(k=h[h.length-1].ownerDocument,n.map(h,ra),m=0;i>m;m++)j=h[m],Z.test(j.type||"")&&!N.access(j,"globalEval")&&n.contains(k,j)&&(j.src?n._evalUrl&&n._evalUrl(j.src):n.globalEval(j.textContent.replace(oa,"")))}return a}function va(a,b,c){for(var d,e=b?n.filter(b,a):a,f=0;null!=(d=e[f]);f++)c||1!==d.nodeType||n.cleanData(_(d)),d.parentNode&&(c&&n.contains(d.ownerDocument,d)&&aa(_(d,"script")),d.parentNode.removeChild(d));return a}n.extend({htmlPrefilter:function(a){return a.replace(ka,"<$1>")},clone:function(a,b,c){var d,e,f,g,h=a.cloneNode(!0),i=n.contains(a.ownerDocument,a);if(!(l.noCloneChecked||1!==a.nodeType&&11!==a.nodeType||n.isXMLDoc(a)))for(g=_(h),f=_(a),d=0,e=f.length;e>d;d++)ta(f[d],g[d]);if(b)if(c)for(f=f||_(a),g=g||_(h),d=0,e=f.length;e>d;d++)sa(f[d],g[d]);else sa(a,h);return g=_(h,"script"),g.length>0&&aa(g,!i&&_(a,"script")),h},cleanData:function(a){for(var b,c,d,e=n.event.special,f=0;void 0!==(c=a[f]);f++)if(L(c)){if(b=c[N.expando]){if(b.events)for(d in b.events)e[d]?n.event.remove(c,d):n.removeEvent(c,d,b.handle);c[N.expando]=void 0}c[O.expando]&&(c[O.expando]=void 0)}}}),n.fn.extend({domManip:ua,detach:function(a){return va(this,a,!0)},remove:function(a){return va(this,a)},text:function(a){return K(this,function(a){return void 0===a?n.text(this):this.empty().each(function(){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||(this.textContent=a)})},null,a,arguments.length)},append:function(){return ua(this,arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=pa(this,a);b.appendChild(a)}})},prepend:function(){return ua(this,arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=pa(this,a);b.insertBefore(a,b.firstChild)}})},before:function(){return ua(this,arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this)})},after:function(){return ua(this,arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this.nextSibling)})},empty:function(){for(var a,b=0;null!=(a=this[b]);b++)1===a.nodeType&&(n.cleanData(_(a,!1)),a.textContent="");return this},clone:function(a,b){return a=null==a?!1:a,b=null==b?a:b,this.map(function(){return n.clone(this,a,b)})},html:function(a){return K(this,function(a){var b=this[0]||{},c=0,d=this.length;if(void 0===a&&1===b.nodeType)return b.innerHTML;if("string"==typeof a&&!la.test(a)&&!$[(Y.exec(a)||["",""])[1].toLowerCase()]){a=n.htmlPrefilter(a);try{for(;d>c;c++)b=this[c]||{},1===b.nodeType&&(n.cleanData(_(b,!1)),b.innerHTML=a);b=0}catch(e){}}b&&this.empty().append(a)},null,a,arguments.length)},replaceWith:function(){var a=[];return ua(this,arguments,function(b){var c=this.parentNode;n.inArray(this,a)<0&&(n.cleanData(_(this)),c&&c.replaceChild(b,this))},a)}}),n.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){n.fn[a]=function(a){for(var c,d=[],e=n(a),f=e.length-1,h=0;f>=h;h++)c=h===f?this:this.clone(!0),n(e[h])[b](c),g.apply(d,c.get());return this.pushStack(d)}});var wa,xa={HTML:"block",BODY:"block"};function ya(a,b){var c=n(b.createElement(a)).appendTo(b.body),d=n.css(c[0],"display");return c.detach(),d}function za(a){var b=d,c=xa[a];return c||(c=ya(a,b),"none"!==c&&c||(wa=(wa||n("