1 """This module encapsulates a document stored in a GNUmed database."""
2
3 __author__ = "Karsten Hilbert <Karsten.Hilbert@gmx.net>"
4 __license__ = "GPL v2 or later"
5
6 import sys, os, shutil, os.path, types, time, logging
7
8
9 if __name__ == '__main__':
10 sys.path.insert(0, '../../')
11 from Gnumed.pycommon import gmExceptions
12 from Gnumed.pycommon import gmBusinessDBObject
13 from Gnumed.pycommon import gmPG2
14 from Gnumed.pycommon import gmTools
15 from Gnumed.pycommon import gmMimeLib
16 from Gnumed.pycommon import gmDateTime
17
18 from Gnumed.business import gmOrganization
19
20
21 _log = logging.getLogger('gm.docs')
22
23 MUGSHOT=26
24 DOCUMENT_TYPE_VISUAL_PROGRESS_NOTE = 'visual progress note'
25 DOCUMENT_TYPE_PRESCRIPTION = 'prescription'
26
27
29 """Represents a folder with medical documents for a single patient."""
30
32 """Fails if
33
34 - patient referenced by aPKey does not exist
35 """
36 self.pk_patient = aPKey
37 if not self._pkey_exists():
38 raise gmExceptions.ConstructorError("No patient with PK [%s] in database." % aPKey)
39
40
41
42
43
44
45
46 _log.debug('instantiated document folder for patient [%s]' % self.pk_patient)
47
50
51
52
54 """Does this primary key exist ?
55
56 - true/false/None
57 """
58
59 rows, idx = gmPG2.run_ro_queries(queries = [
60 {'cmd': "select exists(select pk from dem.identity where pk = %s)", 'args': [self.pk_patient]}
61 ])
62 if not rows[0][0]:
63 _log.error("patient [%s] not in demographic database" % self.pk_patient)
64 return None
65 return True
66
67
68
70 cmd = """
71 SELECT pk_doc
72 FROM blobs.v_doc_med
73 WHERE
74 pk_patient = %(pat)s
75 AND
76 type = %(typ)s
77 AND
78 ext_ref = %(ref)s
79 ORDER BY
80 clin_when DESC
81 LIMIT 1
82 """
83 args = {
84 'pat': self.pk_patient,
85 'typ': DOCUMENT_TYPE_PRESCRIPTION,
86 'ref': 'FreeDiams'
87 }
88 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': args}])
89 if len(rows) == 0:
90 _log.info('no FreeDiams prescription available for patient [%s]' % self.pk_patient)
91 return None
92 prescription = cDocument(aPK_obj = rows[0][0])
93 return prescription
94
95
97 cmd = "SELECT pk_obj FROM blobs.v_latest_mugshot WHERE pk_patient = %s"
98 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': [self.pk_patient]}])
99 if len(rows) == 0:
100 _log.info('no mugshots available for patient [%s]' % self.pk_patient)
101 return None
102 return cDocumentPart(aPK_obj = rows[0][0])
103
104 latest_mugshot = property(get_latest_mugshot, lambda x:x)
105
106
108 if latest_only:
109 cmd = "select pk_doc, pk_obj from blobs.v_latest_mugshot where pk_patient=%s"
110 else:
111 cmd = """
112 select
113 vdm.pk_doc as pk_doc,
114 dobj.pk as pk_obj
115 from
116 blobs.v_doc_med vdm
117 blobs.doc_obj dobj
118 where
119 vdm.pk_type = (select pk from blobs.doc_type where name = 'patient photograph')
120 and vdm.pk_patient = %s
121 and dobj.fk_doc = vdm.pk_doc
122 """
123 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': [self.pk_patient]}])
124 return rows
125
126
128 """return flat list of document IDs"""
129
130 args = {
131 'ID': self.pk_patient,
132 'TYP': doc_type
133 }
134
135 cmd = """
136 select vdm.pk_doc
137 from blobs.v_doc_med vdm
138 where
139 vdm.pk_patient = %%(ID)s
140 %s
141 order by vdm.clin_when"""
142
143 if doc_type is None:
144 cmd = cmd % ''
145 else:
146 try:
147 int(doc_type)
148 cmd = cmd % 'and vdm.pk_type = %(TYP)s'
149 except (TypeError, ValueError):
150 cmd = cmd % 'and vdm.pk_type = (select pk from blobs.doc_type where name = %(TYP)s)'
151
152 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': args}])
153 doc_ids = []
154 for row in rows:
155 doc_ids.append(row[0])
156 return doc_ids
157
158
165
166
168 args = {'pat': self.pk_patient}
169 cmd = _SQL_get_document_fields % """
170 pk_doc IN (
171 SELECT DISTINCT ON (b_vo.pk_doc) b_vo.pk_doc
172 FROM blobs.v_obj4doc_no_data b_vo
173 WHERE
174 pk_patient = %(pat)s
175 AND
176 reviewed IS FALSE
177 )
178 ORDER BY clin_when DESC"""
179 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': args}], get_col_idx = True)
180 return [ cDocument(row = {'pk_field': 'pk_doc', 'idx': idx, 'data': r}) for r in rows ]
181
182
183 - def get_documents(self, doc_type=None, pk_episodes=None, encounter=None, order_by=None, exclude_unsigned=False, pk_types=None):
184 """Return list of documents."""
185
186 args = {
187 'pat': self.pk_patient,
188 'type': doc_type,
189 'enc': encounter
190 }
191 where_parts = ['pk_patient = %(pat)s']
192
193 if doc_type is not None:
194 try:
195 int(doc_type)
196 where_parts.append('pk_type = %(type)s')
197 except (TypeError, ValueError):
198 where_parts.append('pk_type = (SELECT pk FROM blobs.doc_type WHERE name = %(type)s)')
199
200 if pk_types is not None:
201 where_parts.append('pk_type IN %(pk_types)s')
202 args['pk_types'] = tuple(pk_types)
203
204 if (pk_episodes is not None) and (len(pk_episodes) > 0):
205 where_parts.append('pk_episode IN %(epis)s')
206 args['epis'] = tuple(pk_episodes)
207
208 if encounter is not None:
209 where_parts.append('pk_encounter = %(enc)s')
210
211 if exclude_unsigned:
212 where_parts.append('pk_doc IN (SELECT b_vo.pk_doc FROM blobs.v_obj4doc_no_data b_vo WHERE b_vo.pk_patient = %(pat)s AND b_vo.reviewed IS TRUE)')
213
214 if order_by is None:
215 order_by = 'ORDER BY clin_when'
216
217 cmd = "%s\n%s" % (_SQL_get_document_fields % ' AND '.join(where_parts), order_by)
218 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': args}], get_col_idx = True)
219
220 return [ cDocument(row = {'pk_field': 'pk_doc', 'idx': idx, 'data': r}) for r in rows ]
221
222 documents = property(get_documents, lambda x:x)
223
224
225 - def add_document(self, document_type=None, encounter=None, episode=None, link_obj=None):
226 return create_document(link_obj = link_obj, document_type = document_type, encounter = encounter, episode = episode)
227
228
238
239
241 cmd = gmOrganization._SQL_get_org_unit % (
242 'pk_org_unit IN (SELECT DISTINCT ON (pk_org_unit) pk_org_unit FROM blobs.v_doc_med WHERE pk_patient = %(pat)s)'
243 )
244 args = {'pat': self.pk_patient}
245 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': args}], get_col_idx = True)
246 return [ gmOrganization.cOrgUnit(row = {'data': r, 'idx': idx, 'pk_field': 'pk_org_unit'}) for r in rows ]
247
248 all_document_org_units = property(_get_all_document_org_units, lambda x:x)
249
250
251 _SQL_get_document_part_fields = "select * from blobs.v_obj4doc_no_data where %s"
252
254 """Represents one part of a medical document."""
255
256 _cmd_fetch_payload = _SQL_get_document_part_fields % "pk_obj = %s"
257 _cmds_store_payload = [
258 """UPDATE blobs.doc_obj SET
259 seq_idx = %(seq_idx)s,
260 comment = gm.nullify_empty_string(%(obj_comment)s),
261 filename = gm.nullify_empty_string(%(filename)s),
262 fk_intended_reviewer = %(pk_intended_reviewer)s
263 WHERE
264 pk = %(pk_obj)s
265 AND
266 xmin = %(xmin_doc_obj)s
267 RETURNING
268 xmin AS xmin_doc_obj"""
269 ]
270 _updatable_fields = [
271 'seq_idx',
272 'obj_comment',
273 'pk_intended_reviewer',
274 'filename'
275 ]
276
277
278
279 - def save_to_file(self, aChunkSize=0, filename=None, target_mime=None, target_extension=None, ignore_conversion_problems=False, directory=None, adjust_extension=False, conn=None):
280
281 if self._payload[self._idx['size']] == 0:
282 return None
283
284 if filename is None:
285 filename = self.get_useful_filename(make_unique = True, directory = directory)
286
287 success = gmPG2.bytea2file (
288 data_query = {
289 'cmd': 'SELECT substring(data from %(start)s for %(size)s) FROM blobs.doc_obj WHERE pk=%(pk)s',
290 'args': {'pk': self.pk_obj}
291 },
292 filename = filename,
293 chunk_size = aChunkSize,
294 data_size = self._payload[self._idx['size']],
295 conn = conn
296 )
297 if not success:
298 return None
299
300 if target_mime is None:
301 if filename.endswith('.dat'):
302 if adjust_extension:
303 return gmMimeLib.adjust_extension_by_mimetype(filename)
304 return filename
305
306 if target_extension is None:
307 target_extension = gmMimeLib.guess_ext_by_mimetype(mimetype = target_mime)
308
309 target_path, name = os.path.split(filename)
310 name, tmp = os.path.splitext(name)
311 target_fname = gmTools.get_unique_filename (
312 prefix = '%s-conv-' % name,
313 suffix = target_extension
314 )
315 _log.debug('attempting conversion: [%s] -> [<%s>:%s]', filename, target_mime, target_fname)
316 if gmMimeLib.convert_file (
317 filename = filename,
318 target_mime = target_mime,
319 target_filename = target_fname
320 ):
321 return target_fname
322
323 _log.warning('conversion failed')
324 if not ignore_conversion_problems:
325 return None
326
327 if filename.endswith('.dat'):
328 if adjust_extension:
329 filename = gmMimeLib.adjust_extension_by_mimetype(filename)
330 _log.warning('programmed to ignore conversion problems, hoping receiver can handle [%s]', filename)
331 return filename
332
333
335 cmd = """
336 SELECT
337 reviewer,
338 reviewed_when,
339 is_technically_abnormal,
340 clinically_relevant,
341 is_review_by_responsible_reviewer,
342 is_your_review,
343 coalesce(comment, '')
344 FROM blobs.v_reviewed_doc_objects
345 WHERE pk_doc_obj = %s
346 ORDER BY
347 is_your_review desc,
348 is_review_by_responsible_reviewer desc,
349 reviewed_when desc
350 """
351 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': [self.pk_obj]}])
352 return rows
353
354
356 return cDocument(aPK_obj = self._payload[self._idx['pk_doc']])
357
358
359
360
362
363 if not (os.access(fname, os.R_OK) and os.path.isfile(fname)):
364 _log.error('[%s] is not a readable file' % fname)
365 return False
366
367 if not gmPG2.file2bytea (
368 conn = link_obj,
369 query = "UPDATE blobs.doc_obj SET data = %(data)s::bytea WHERE pk = %(pk)s RETURNING md5(data) AS md5",
370 filename = fname,
371 args = {'pk': self.pk_obj},
372 file_md5 = gmTools.file2md5(filename = fname, return_hex = True)
373 ):
374 return False
375
376
377 self.refetch_payload(link_obj = link_obj)
378 return True
379
380
381 - def set_reviewed(self, technically_abnormal=None, clinically_relevant=None):
382
383 cmd = """
384 select pk
385 from blobs.reviewed_doc_objs
386 where
387 fk_reviewed_row = %s and
388 fk_reviewer = (select pk from dem.staff where db_user = current_user)"""
389 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': [self.pk_obj]}])
390
391
392 if len(rows) == 0:
393 cols = [
394 "fk_reviewer",
395 "fk_reviewed_row",
396 "is_technically_abnormal",
397 "clinically_relevant"
398 ]
399 vals = [
400 '%(fk_row)s',
401 '%(abnormal)s',
402 '%(relevant)s'
403 ]
404 args = {
405 'fk_row': self.pk_obj,
406 'abnormal': technically_abnormal,
407 'relevant': clinically_relevant
408 }
409 cmd = """
410 insert into blobs.reviewed_doc_objs (
411 %s
412 ) values (
413 (select pk from dem.staff where db_user=current_user),
414 %s
415 )""" % (', '.join(cols), ', '.join(vals))
416
417
418 if len(rows) == 1:
419 pk_review = rows[0][0]
420 args = {
421 'abnormal': technically_abnormal,
422 'relevant': clinically_relevant,
423 'pk_review': pk_review
424 }
425 cmd = """
426 UPDATE blobs.reviewed_doc_objs SET
427 is_technically_abnormal = %(abnormal)s,
428 clinically_relevant = %(relevant)s
429 WHERE
430 pk = %(pk_review)s
431 """
432 rows, idx = gmPG2.run_rw_queries(queries = [{'cmd': cmd, 'args': args}])
433
434 return True
435
436
438 if self._payload[self._idx['type']] != 'patient photograph':
439 return False
440
441 cmd = 'SELECT coalesce(max(seq_idx)+1, 1) FROM blobs.doc_obj WHERE fk_doc = %(doc_id)s'
442 rows, idx = gmPG2.run_ro_queries (
443 queries = [{
444 'cmd': cmd,
445 'args': {'doc_id': self._payload[self._idx['pk_doc']]}
446 }]
447 )
448 self._payload[self._idx['seq_idx']] = rows[0][0]
449 self._is_modified = True
450 self.save_payload()
451
452
454 if pk_doc == self._payload[self._idx['pk_doc']]:
455 return True
456
457 cmd = """
458 UPDATE blobs.doc_obj SET
459 fk_doc = %(pk_doc_target)s,
460 -- coalesce needed for no-parts target docs
461 seq_idx = (SELECT coalesce(max(seq_idx) + 1, 1) FROM blobs.doc_obj WHERE fk_doc = %(pk_doc_target)s)
462 WHERE
463 EXISTS(SELECT 1 FROM blobs.doc_med WHERE pk = %(pk_doc_target)s)
464 AND
465 pk = %(pk_obj)s
466 AND
467 xmin = %(xmin_doc_obj)s
468 RETURNING fk_doc
469 """
470 args = {
471 'pk_doc_target': pk_doc,
472 'pk_obj': self.pk_obj,
473 'xmin_doc_obj': self._payload[self._idx['xmin_doc_obj']]
474 }
475 rows, idx = gmPG2.run_rw_queries(queries = [{'cmd': cmd, 'args': args}], return_data = True, get_col_idx = False)
476 if len(rows) == 0:
477 return False
478
479
480
481
482
483
484 if rows[0]['fk_doc'] == self._payload[self._idx['pk_doc']]:
485 return False
486
487 self.refetch_payload()
488 return True
489
491
492 fname = self.save_to_file(aChunkSize = chunksize)
493 if fname is None:
494 return False, ''
495
496 success, msg = gmMimeLib.call_viewer_on_file(fname, block = block)
497 if not success:
498 return False, msg
499
500 return True, ''
501
502
519
520
559
560
561 - def get_useful_filename(self, patient=None, make_unique=False, directory=None, include_gnumed_tag=True, date_before_type=False, name_first=True):
562 patient_part = ''
563 if patient is not None:
564 if name_first:
565 patient_part = '%s-' % patient.subdir_name
566 else:
567 patient_part = '-%s' % patient.subdir_name
568
569
570 suffix = '.dat'
571 if self._payload[self._idx['filename']] is not None:
572 tmp, suffix = os.path.splitext (
573 gmTools.fname_sanitize(self._payload[self._idx['filename']]).lower()
574 )
575 if suffix == '':
576 suffix = '.dat'
577
578 if include_gnumed_tag:
579 fname_template = 'gm_doc-part_%s-%%s' % self._payload[self._idx['seq_idx']]
580 else:
581 fname_template = '%%s-part_%s' % self._payload[self._idx['seq_idx']]
582
583 if date_before_type:
584 date_type_part = '%s-%s' % (
585 gmDateTime.pydt_strftime(self._payload[self._idx['date_generated']], '%Y-%m-%d', 'utf-8', gmDateTime.acc_days),
586 self._payload[self._idx['l10n_type']].replace(' ', '_').replace('-', '_'),
587 )
588 else:
589 date_type_part = '%s-%s' % (
590 self._payload[self._idx['l10n_type']].replace(' ', '_').replace('-', '_'),
591 gmDateTime.pydt_strftime(self._payload[self._idx['date_generated']], '%Y-%m-%d', 'utf-8', gmDateTime.acc_days)
592 )
593
594 if name_first:
595 date_type_name_part = patient_part + date_type_part
596 else:
597 date_type_name_part = date_type_part + patient_part
598
599 fname = fname_template % date_type_name_part
600
601 if make_unique:
602 fname = gmTools.get_unique_filename (
603 prefix = '%s-' % gmTools.fname_sanitize(fname),
604 suffix = suffix,
605 tmp_dir = directory
606 )
607 else:
608 fname = gmTools.fname_sanitize(os.path.join(gmTools.coalesce(directory, ''), fname + suffix))
609
610 return fname
611
612
614 cmd = """
615 SELECT blobs.delete_document_part(%(pk)s, %(enc)s)
616 WHERE NOT EXISTS
617 (SELECT 1 FROM clin.export_item where fk_doc_obj = %(pk)s)
618 """
619 args = {'pk': part_pk, 'enc': encounter_pk}
620 rows, idx = gmPG2.run_rw_queries(queries = [{'cmd': cmd, 'args': args}])
621 return
622
623
624 _SQL_get_document_fields = "SELECT * FROM blobs.v_doc_med b_vdm WHERE %s"
625
626 -class cDocument(gmBusinessDBObject.cBusinessDBObject):
627 """Represents one medical document."""
628
629 _cmd_fetch_payload = _SQL_get_document_fields % "pk_doc = %s"
630 _cmds_store_payload = [
631 """UPDATE blobs.doc_med SET
632 fk_type = %(pk_type)s,
633 fk_episode = %(pk_episode)s,
634 fk_encounter = %(pk_encounter)s,
635 fk_org_unit = %(pk_org_unit)s,
636 unit_is_receiver = %(unit_is_receiver)s,
637 clin_when = %(clin_when)s,
638 comment = gm.nullify_empty_string(%(comment)s),
639 ext_ref = gm.nullify_empty_string(%(ext_ref)s),
640 fk_hospital_stay = %(pk_hospital_stay)s
641 WHERE
642 pk = %(pk_doc)s and
643 xmin = %(xmin_doc_med)s
644 RETURNING
645 xmin AS xmin_doc_med"""
646 ]
647 _updatable_fields = [
648 'pk_type',
649 'comment',
650 'clin_when',
651 'ext_ref',
652 'pk_episode',
653 'pk_encounter',
654 'pk_org_unit',
655 'unit_is_receiver',
656 'pk_hospital_stay'
657 ]
658
659
661 try: del self.__has_unreviewed_parts
662 except AttributeError: pass
663
664 return super(cDocument, self).refetch_payload(ignore_changes = ignore_changes, link_obj = link_obj)
665
666
668 """Get document descriptions.
669
670 - will return a list of rows
671 """
672 if max_lng is None:
673 cmd = "SELECT pk, text FROM blobs.doc_desc WHERE fk_doc = %s"
674 else:
675 cmd = "SELECT pk, substring(text from 1 for %s) FROM blobs.doc_desc WHERE fk_doc=%%s" % max_lng
676 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': [self.pk_obj]}])
677 return rows
678
679
684
685
687 cmd = "update blobs.doc_desc set text = %(desc)s where fk_doc = %(doc)s and pk = %(pk_desc)s"
688 gmPG2.run_rw_queries(queries = [
689 {'cmd': cmd, 'args': {'doc': self.pk_obj, 'pk_desc': pk, 'desc': description}}
690 ])
691 return True
692
693
695 cmd = "delete from blobs.doc_desc where fk_doc = %(doc)s and pk = %(desc)s"
696 gmPG2.run_rw_queries(queries = [{'cmd': cmd, 'args': {'doc': self.pk_obj, 'desc': pk}}])
697 return True
698
699
704
705 parts = property(_get_parts, lambda x:x)
706
707
708 - def add_part(self, file=None, link_obj=None):
709 """Add a part to the document."""
710
711 cmd = """
712 INSERT INTO blobs.doc_obj (
713 fk_doc, data, seq_idx
714 ) VALUES (
715 %(doc_id)s,
716 ''::bytea,
717 (SELECT coalesce(max(seq_idx)+1, 1) FROM blobs.doc_obj WHERE fk_doc = %(doc_id)s)
718 ) RETURNING pk"""
719 rows, idx = gmPG2.run_rw_queries (
720 link_obj = link_obj,
721 queries = [{'cmd': cmd, 'args': {'doc_id': self.pk_obj}}],
722 return_data = True
723 )
724
725 pk_part = rows[0][0]
726 new_part = cDocumentPart(aPK_obj = pk_part, link_obj = link_obj)
727 if not new_part.update_data_from_file(link_obj = link_obj, fname = file):
728 _log.error('cannot import binary data from [%s] into document part' % file)
729 gmPG2.run_rw_queries (
730 link_obj = link_obj,
731 queries = [{'cmd': "DELETE FROM blobs.doc_obj WHERE pk = %s", 'args': [pk_part]}]
732 )
733 return None
734 new_part['filename'] = file
735 new_part.save_payload(conn = link_obj)
736
737 return new_part
738
739
741
742 new_parts = []
743
744 for filename in files:
745 new_part = self.add_part(file = filename)
746 if new_part is None:
747 msg = 'cannot instantiate document part object from [%s]' % filename
748 _log.error(msg)
749 return (False, msg, filename)
750 new_parts.append(new_part)
751
752 if reviewer is not None:
753 new_part['pk_intended_reviewer'] = reviewer
754 success, data = new_part.save_payload()
755 if not success:
756 msg = 'cannot set reviewer to [%s] on [%s]' % (reviewer, filename)
757 _log.error(msg)
758 _log.error(str(data))
759 return (False, msg, filename)
760
761 return (True, '', new_parts)
762
763
765 fnames = []
766 for part in self.parts:
767 fname = part.save_to_file(aChunkSize = chunksize, directory = export_dir, conn = conn)
768
769
770
771 if fname is None:
772 _log.error('cannot export document part [%s]', part)
773 continue
774 fnames.append(fname)
775 return fnames
776
777
779 try:
780 return self.__has_unreviewed_parts
781 except AttributeError:
782 pass
783
784 cmd = "SELECT EXISTS(SELECT 1 FROM blobs.v_obj4doc_no_data WHERE pk_doc = %(pk)s AND reviewed IS FALSE)"
785 args = {'pk': self.pk_obj}
786 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': args}])
787 self.__has_unreviewed_parts = rows[0][0]
788
789 return self.__has_unreviewed_parts
790
791 has_unreviewed_parts = property(_get_has_unreviewed_parts, lambda x:x)
792
793
794 - def set_reviewed(self, technically_abnormal=None, clinically_relevant=None):
795
796 for part in self.parts:
797 if not part.set_reviewed(technically_abnormal, clinically_relevant):
798 return False
799 return True
800
801
803 for part in self.parts:
804 part['pk_intended_reviewer'] = reviewer
805 success, data = part.save_payload()
806 if not success:
807 _log.error('cannot set reviewer to [%s]' % reviewer)
808 _log.error(str(data))
809 return False
810 return True
811
812
848
849
905
906
912
913 hospital_stay = property(_get_hospital_stay, lambda x:x)
914
915
917 if self._payload[self._idx['pk_org_unit']] is None:
918 return None
919 return gmOrganization.cOrgUnit(self._payload[self._idx['pk_org_unit']])
920
921 org_unit = property(_get_org_unit, lambda x:x)
922
923
927
928 procedures = property(_get_procedures, lambda x:x)
929
930
934
935 bills = property(_get_bills, lambda x:x)
936
937
938 -def create_document(document_type=None, encounter=None, episode=None, link_obj=None):
939 """Returns new document instance or raises an exception."""
940 try:
941 int(document_type)
942 cmd = """INSERT INTO blobs.doc_med (fk_type, fk_encounter, fk_episode) VALUES (%(type)s, %(enc)s, %(epi)s) RETURNING pk"""
943 except ValueError:
944 create_document_type(document_type = document_type)
945 cmd = """
946 INSERT INTO blobs.doc_med (
947 fk_type,
948 fk_encounter,
949 fk_episode
950 ) VALUES (
951 coalesce (
952 (SELECT pk from blobs.doc_type bdt where bdt.name = %(type)s),
953 (SELECT pk from blobs.doc_type bdt where _(bdt.name) = %(type)s)
954 ),
955 %(enc)s,
956 %(epi)s
957 ) RETURNING pk"""
958 args = {'type': document_type, 'enc': encounter, 'epi': episode}
959 rows, idx = gmPG2.run_rw_queries(link_obj = link_obj, queries = [{'cmd': cmd, 'args': args}], return_data = True)
960 doc = cDocument(aPK_obj = rows[0][0], link_obj = link_obj)
961 return doc
962
963
964 -def search_for_documents(patient_id=None, type_id=None, external_reference=None, pk_episode=None, pk_types=None):
965 """Searches for documents with the given patient and type ID."""
966
967 if (patient_id is None) and (pk_episode is None):
968 raise ValueError('need patient_id or pk_episode to search for document')
969
970 where_parts = []
971 args = {
972 'pat_id': patient_id,
973 'type_id': type_id,
974 'ref': external_reference,
975 'pk_epi': pk_episode
976 }
977
978 if patient_id is not None:
979 where_parts.append('pk_patient = %(pat_id)s')
980
981 if type_id is not None:
982 where_parts.append('pk_type = %(type_id)s')
983
984 if external_reference is not None:
985 where_parts.append('ext_ref = %(ref)s')
986
987 if pk_episode is not None:
988 where_parts.append('pk_episode = %(pk_epi)s')
989
990 if pk_types is not None:
991 where_parts.append('pk_type IN %(pk_types)s')
992 args['pk_types'] = tuple(pk_types)
993
994 cmd = _SQL_get_document_fields % ' AND '.join(where_parts)
995 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': args}], get_col_idx = True)
996 return [ cDocument(row = {'data': r, 'idx': idx, 'pk_field': 'pk_doc'}) for r in rows ]
997
998
1000
1001 cmd = "SELECT blobs.delete_document(%(pk)s, %(enc)s)"
1002 args = {'pk': document_id, 'enc': encounter_id}
1003 rows, idx = gmPG2.run_rw_queries(queries = [{'cmd': cmd, 'args': args}], return_data = True)
1004 if not rows[0][0]:
1005 _log.error('cannot delete document [%s]', document_id)
1006 return False
1007 return True
1008
1009
1011
1012 _log.debug('reclassifying documents by type')
1013 _log.debug('original: %s', original_type)
1014 _log.debug('target: %s', target_type)
1015
1016 if target_type['pk_doc_type'] == original_type['pk_doc_type']:
1017 return True
1018
1019 cmd = """
1020 update blobs.doc_med set
1021 fk_type = %(new_type)s
1022 where
1023 fk_type = %(old_type)s
1024 """
1025 args = {'new_type': target_type['pk_doc_type'], 'old_type': original_type['pk_doc_type']}
1026
1027 gmPG2.run_rw_queries(queries = [{'cmd': cmd, 'args': args}])
1028
1029 return True
1030
1031
1033 """Represents a document type."""
1034 _cmd_fetch_payload = """select * from blobs.v_doc_type where pk_doc_type=%s"""
1035 _cmds_store_payload = [
1036 """update blobs.doc_type set
1037 name = %(type)s
1038 where
1039 pk=%(pk_obj)s and
1040 xmin=%(xmin_doc_type)s""",
1041 """select xmin_doc_type from blobs.v_doc_type where pk_doc_type = %(pk_obj)s"""
1042 ]
1043 _updatable_fields = ['type']
1044
1046
1047 if translation.strip() == '':
1048 return False
1049
1050 if translation.strip() == self._payload[self._idx['l10n_type']].strip():
1051 return True
1052
1053 rows, idx = gmPG2.run_rw_queries (
1054 queries = [
1055 {'cmd': 'select i18n.i18n(%s)', 'args': [self._payload[self._idx['type']]]},
1056 {'cmd': 'select i18n.upd_tx((select i18n.get_curr_lang()), %(orig)s, %(tx)s)',
1057 'args': {
1058 'orig': self._payload[self._idx['type']],
1059 'tx': translation
1060 }
1061 }
1062 ],
1063 return_data = True
1064 )
1065 if not rows[0][0]:
1066 _log.error('cannot set translation to [%s]' % translation)
1067 return False
1068
1069 return self.refetch_payload()
1070
1071
1073 rows, idx = gmPG2.run_ro_queries (
1074 queries = [{'cmd': "SELECT * FROM blobs.v_doc_type"}],
1075 get_col_idx = True
1076 )
1077 doc_types = []
1078 for row in rows:
1079 row_def = {'pk_field': 'pk_doc_type', 'idx': idx, 'data': row}
1080 doc_types.append(cDocumentType(row = row_def))
1081 return doc_types
1082
1083
1085 args = {'typ': document_type.strip()}
1086
1087 cmd = 'SELECT pk FROM blobs.doc_type WHERE name = %(typ)s'
1088 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': args}], get_col_idx = False)
1089 if len(rows) == 0:
1090 cmd = 'SELECT pk FROM blobs.doc_type WHERE _(name) = %(typ)s'
1091 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': args}], get_col_idx = False)
1092
1093 if len(rows) == 0:
1094 return None
1095
1096 return rows[0]['pk']
1097
1098
1100 args = {'types': tuple(document_types)}
1101 cmd = 'SELECT pk_doc_type, coalesce(l10n_type, type) as desc FROM blobs.v_doc_type WHERE l10n_type IN %(types)s OR type IN %(types)s'
1102 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': args}], get_col_idx = False)
1103 return rows
1104
1105
1107
1108 cmd = 'SELECT pk FROM blobs.doc_type WHERE name = %s'
1109 rows, idx = gmPG2.run_ro_queries (
1110 queries = [{'cmd': cmd, 'args': [document_type]}]
1111 )
1112 if len(rows) == 0:
1113 _log.debug('creating document type [%s]', document_type)
1114 cmd1 = "INSERT INTO blobs.doc_type (name) VALUES (%s) RETURNING pk"
1115 rows, idx = gmPG2.run_rw_queries (
1116 queries = [{'cmd': cmd1, 'args': [document_type]}],
1117 return_data = True
1118 )
1119 return cDocumentType(aPK_obj = rows[0][0])
1120
1121
1123 if document_type['is_in_use']:
1124 return False
1125 gmPG2.run_rw_queries (
1126 queries = [{
1127 'cmd': 'delete from blobs.doc_type where pk=%s',
1128 'args': [document_type['pk_doc_type']]
1129 }]
1130 )
1131 return True
1132
1133
1135 """This needs *considerably* more smarts."""
1136 dirname = gmTools.get_unique_filename (
1137 prefix = '',
1138 suffix = time.strftime(".%Y%m%d-%H%M%S", time.localtime())
1139 )
1140
1141 path, doc_ID = os.path.split(dirname)
1142 return doc_ID
1143
1144
1145
1146
1147 if __name__ == '__main__':
1148
1149 if len(sys.argv) < 2:
1150 sys.exit()
1151
1152 if sys.argv[1] != 'test':
1153 sys.exit()
1154
1155
1157
1158 print("----------------------")
1159 print("listing document types")
1160 print("----------------------")
1161
1162 for dt in get_document_types():
1163 print(dt)
1164
1165 print("------------------------------")
1166 print("testing document type handling")
1167 print("------------------------------")
1168
1169 dt = create_document_type(document_type = 'dummy doc type for unit test 1')
1170 print("created:", dt)
1171
1172 dt['type'] = 'dummy doc type for unit test 2'
1173 dt.save_payload()
1174 print("changed base name:", dt)
1175
1176 dt.set_translation(translation = 'Dummy-Dokumenten-Typ fuer Unit-Test')
1177 print("translated:", dt)
1178
1179 print("deleted:", delete_document_type(document_type = dt))
1180
1181 return
1182
1184
1185 print("-----------------------")
1186 print("testing document import")
1187 print("-----------------------")
1188
1189 docs = search_for_documents(patient_id=12)
1190 doc = docs[0]
1191 print("adding to doc:", doc)
1192
1193 fname = sys.argv[1]
1194 print("adding from file:", fname)
1195 part = doc.add_part(file=fname)
1196 print("new part:", part)
1197
1198 return
1199
1201
1202 doc_folder = cDocumentFolder(aPKey=12)
1203
1204
1205
1206
1207 docs = doc_folder.get_documents()
1208 for doc in docs:
1209
1210
1211
1212 print('--------------------------')
1213 print(doc.format(single_line = True))
1214 print(doc.format())
1215
1216
1232
1233
1234 from Gnumed.pycommon import gmI18N
1235 gmI18N.activate_locale()
1236 gmI18N.install_domain()
1237
1238
1239
1240 test_get_documents()
1241
1242
1243
1244