source: main/waeup.kofa/trunk/src/waeup/kofa/documents/batching.py @ 17808

Last change on this file since 17808 was 17787, checked in by Henrik Bettermann, 6 months ago

Add SessionConfigurationProcessor.
Add ConfigurationContainerProcessor.
Add ConfigurationContainerExporter.

  • Property svn:keywords set to Id
File size: 6.6 KB
RevLine 
[12438]1## $Id: batching.py 17787 2024-05-15 06:42:58Z henrik $
[12437]2##
3## Copyright (C) 2014 Uli Fouquet & Henrik Bettermann
4## This program is free software; you can redistribute it and/or modify
5## it under the terms of the GNU General Public License as published by
6## the Free Software Foundation; either version 2 of the License, or
7## (at your option) any later version.
8##
9## This program is distributed in the hope that it will be useful,
10## but WITHOUT ANY WARRANTY; without even the implied warranty of
11## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12## GNU General Public License for more details.
13##
14## You should have received a copy of the GNU General Public License
15## along with this program; if not, write to the Free Software
16## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17##
18"""Batch processing components for document objects.
19
20Batch processors eat CSV files to add, update or remove large numbers
21of certain kinds of objects at once.
22"""
23import grok
24import unicodecsv as csv  # XXX: csv ops should move to dedicated module.
25from time import time
[17763]26from ast import literal_eval
[12437]27from datetime import datetime
28from zope.i18n import translate
29from zope.interface import Interface
30from zope.schema import getFields
31from zope.component import queryUtility, getUtility, createObject
32from zope.event import notify
33from zope.catalog.interfaces import ICatalog
34from hurry.workflow.interfaces import IWorkflowState, IWorkflowInfo
[17763]35from waeup.kofa.interfaces import (IObjectHistory,
[12437]36    IBatchProcessor, FatalCSVError, IObjectConverter, IUserAccount,
[13138]37    IGNORE_MARKER)
[12437]38from waeup.kofa.interfaces import IKofaUtils
39from waeup.kofa.interfaces import MessageFactory as _
40from waeup.kofa.documents.interfaces import (
41    IPDFDocument, IHTMLDocument, IRESTDocument)
42from waeup.kofa.utils.batching import BatchProcessor
43
44
45class DocumentProcessorBase(BatchProcessor):
[13144]46    """This is the base class for all kinds of document processors.
47    The `checkConversion` method checks whether `class_name` in a row
48    corresponds with the processor chosen. This is to avoid accidentally
49    wrong imports.
[13145]50
51    Document processors do not import workflow states or transitions which
52    means, all imported documents will be unpublished after batch creation.
53    In other words, publishing can't be done by import, it has do be done
54    via the UI.
[12437]55    """
56    grok.implements(IBatchProcessor)
57    grok.provides(IBatchProcessor)
58    grok.context(Interface)
59    grok.baseclass()
60
61    util_name = None
62    name = None
63    iface = None
64
65    location_fields = ['document_id',]
[17763]66    additional_fields = ['class_name', 'state', 'history']
[12437]67
68    factory_name = None
69
70    @property
71    def available_fields(self):
72        return sorted(list(set(
73                    self.additional_fields +
74                    getFields(self.iface).keys())))
75
76    def parentsExist(self, row, site):
77        return 'documents' in site.keys()
78
79    def entryExists(self, row, site):
80        document_id = row.get('document_id', None)
81        cat = queryUtility(ICatalog, name='documents_catalog')
82        results = list(cat.searchResults(document_id=(document_id, document_id)))
83        if results:
84            return True
85        return False
86
87    def getParent(self, row, site):
88        return site['documents']
89
90    def getEntry(self, row, site):
91        if not self.entryExists(row, site):
92            return None
93        parent = self.getParent(row, site)
94        return parent.get(row['document_id'])
95
96    def addEntry(self, obj, row, site):
97        parent = self.getParent(row, site)
98        parent.addDocument(obj)
99        return
100
101    def delEntry(self, row, site):
102        document = self.getEntry(row, site)
103        parent = self.getParent(row, site)
104        if document is not None:
105            grok.getSite().logger.info(
106                '%s - Document removed' % document.document_id)
107            del parent[document.document_id]
108        return
109
110    def updateEntry(self, obj, row, site, filename):
111        """Update obj to the values given in row.
112        """
113        items_changed = super(DocumentProcessorBase, self).updateEntry(
114            obj, row, site, filename)
[17763]115        # Replace entire history
116        if 'history' in row:
117            new_history = row.get('history', IGNORE_MARKER)
118            if new_history not in (IGNORE_MARKER, ''):
119                history = IObjectHistory(obj)
120                history._annotations[
121                    history.history_key] = literal_eval(new_history)
122                items_changed += ('%s=%s, ' % ('history', new_history))
123            row.pop('history')
124        # Update state
125        if 'state' in row:
126            state = row.get('state', IGNORE_MARKER)
127            if state not in (IGNORE_MARKER, ''):
128                IWorkflowState(obj).setState(state)
129                msg = _("State '${a}' set", mapping = {'a':state})
130                history = IObjectHistory(obj)
131                history.addMessage(msg)
132                items_changed += ('%s=%s, ' % ('state', state))
133            row.pop('state')
[12437]134        # Log actions...
135        location_field = self.location_fields[0]
136        grok.getSite().logger.info(
137            '%s - %s - %s - updated: %s'
138            % (self.name, filename, row[location_field], items_changed))
139        return
140
141    def checkConversion(self, row, mode='ignore'):
142        """Validates all values in row.
143        """
144        errs, inv_errs, conv_dict = super(
145            DocumentProcessorBase, self).checkConversion(row, mode=mode)
146        # We need to check if the class_name corresponds with the
147        # processor chosen. This is to avoid accidentally wrong imports.
148        if mode == 'create':
149            class_name = row.get('class_name', None)
150            if class_name != self.factory_name.strip('waeup.'):
151                errs.append(('class_name','wrong processor'))
152        return errs, inv_errs, conv_dict
153
154
155class PDFDocumentProcessor(DocumentProcessorBase):
156    """A batch processor for IPDFDocument objects.
157    """
158    util_name = 'pdfdocumentprocessor'
159    grok.name(util_name)
160
161    name = _('Public PDF Document Processor')
162    iface = IPDFDocument
163
164    factory_name = 'waeup.PDFDocument'
165
166
167class HTMLDocumentProcessor(DocumentProcessorBase):
168    """A batch processor for IHTMLDocument objects.
169    """
170    util_name = 'htmldocumentprocessor'
171    grok.name(util_name)
172
173    name = _('Public HTML Document Processor')
174    iface = IHTMLDocument
175
176    factory_name = 'waeup.HTMLDocument'
177
178
179class RESTDocumentProcessor(DocumentProcessorBase):
180    """A batch processor for IRESTDocument objects.
181    """
182    util_name = 'restdocumentprocessor'
183    grok.name(util_name)
184
185    name = _('Public REST Document Processor')
186    iface = IRESTDocument
187
188    factory_name = 'waeup.RESTDocument'
Note: See TracBrowser for help on using the repository browser.