1 | /** |
---|
2 | * Importer service |
---|
3 | * |
---|
4 | * The importer service handles the import of tabular, comma delimited and Excel format |
---|
5 | * based files. |
---|
6 | * |
---|
7 | * @package importer |
---|
8 | * @author t.w.abma@umcutrecht.nl |
---|
9 | * @since 20100126 |
---|
10 | * |
---|
11 | * Revision information: |
---|
12 | * $Rev: 735 $ |
---|
13 | * $Author: tabma $ |
---|
14 | * $Date: 2010-07-27 14:26:35 +0000 (di, 27 jul 2010) $ |
---|
15 | */ |
---|
16 | |
---|
17 | package dbnp.importer |
---|
18 | import org.apache.poi.hssf.usermodel.* |
---|
19 | import org.apache.poi.poifs.filesystem.POIFSFileSystem |
---|
20 | import org.apache.poi.ss.usermodel.DataFormatter |
---|
21 | |
---|
22 | import dbnp.studycapturing.TemplateFieldType |
---|
23 | import dbnp.studycapturing.Template |
---|
24 | import dbnp.studycapturing.SamplingEvent |
---|
25 | import dbnp.studycapturing.Study |
---|
26 | import dbnp.studycapturing.Subject |
---|
27 | import dbnp.studycapturing.Event |
---|
28 | import dbnp.studycapturing.Sample |
---|
29 | |
---|
30 | import dbnp.data.Term |
---|
31 | |
---|
32 | class ImporterService { |
---|
33 | |
---|
34 | boolean transactional = true |
---|
35 | |
---|
36 | /** |
---|
37 | * @param is input stream representing the (workbook) resource |
---|
38 | * @return high level representation of the workbook |
---|
39 | */ |
---|
40 | HSSFWorkbook getWorkbook(InputStream is) { |
---|
41 | POIFSFileSystem fs = new POIFSFileSystem(is) |
---|
42 | HSSFWorkbook wb = new HSSFWorkbook(fs); |
---|
43 | return wb; |
---|
44 | } |
---|
45 | |
---|
46 | /** |
---|
47 | * @param wb high level representation of the workbook |
---|
48 | * @param sheetindex sheet to use within the workbook |
---|
49 | * @return header representation as a MappingColumn hashmap |
---|
50 | */ |
---|
51 | def getHeader(HSSFWorkbook wb, int sheetindex, int headerrow, int datamatrix_start, theEntity=null){ |
---|
52 | |
---|
53 | def sheet = wb.getSheetAt(sheetindex) |
---|
54 | def sheetrow = sheet.getRow(datamatrix_start) |
---|
55 | //def header = [] |
---|
56 | def header = [:] |
---|
57 | def df = new DataFormatter() |
---|
58 | def property = new String() |
---|
59 | |
---|
60 | //for (HSSFCell c: sheet.getRow(datamatrix_start)) { |
---|
61 | |
---|
62 | (0..sheetrow.getLastCellNum() -1 ).each { columnindex -> |
---|
63 | |
---|
64 | //def index = c.getColumnIndex() |
---|
65 | def datamatrix_celltype = sheet.getRow(datamatrix_start).getCell(columnindex, org.apache.poi.ss.usermodel.Row.CREATE_NULL_AS_BLANK).getCellType() |
---|
66 | def datamatrix_celldata = df.formatCellValue(sheet.getRow(datamatrix_start).getCell(columnindex)) |
---|
67 | def datamatrix_cell = sheet.getRow(datamatrix_start).getCell(columnindex) |
---|
68 | def headercell = sheet.getRow(sheet.getFirstRowNum()).getCell(columnindex) |
---|
69 | def tft = TemplateFieldType.STRING //default templatefield type |
---|
70 | |
---|
71 | // Check for every celltype, currently redundant code, but possibly this will be |
---|
72 | // a piece of custom code for every cell type like specific formatting |
---|
73 | |
---|
74 | switch (datamatrix_celltype) { |
---|
75 | case HSSFCell.CELL_TYPE_STRING: |
---|
76 | //parse cell value as double |
---|
77 | def doubleBoolean = true |
---|
78 | def fieldtype = TemplateFieldType.STRING |
---|
79 | |
---|
80 | // is this string perhaps a double? |
---|
81 | try { |
---|
82 | formatValue(datamatrix_celldata, TemplateFieldType.DOUBLE) |
---|
83 | } catch (NumberFormatException nfe) { doubleBoolean = false } |
---|
84 | finally { |
---|
85 | if (doubleBoolean) fieldtype = TemplateFieldType.DOUBLE |
---|
86 | } |
---|
87 | |
---|
88 | header[columnindex] = new dbnp.importer.MappingColumn(name:df.formatCellValue(headercell), |
---|
89 | templatefieldtype:fieldtype, |
---|
90 | index:columnindex, |
---|
91 | entity:theEntity, |
---|
92 | property:property); |
---|
93 | |
---|
94 | break |
---|
95 | case HSSFCell.CELL_TYPE_NUMERIC: |
---|
96 | def fieldtype = TemplateFieldType.INTEGER |
---|
97 | def doubleBoolean = true |
---|
98 | def integerBoolean = true |
---|
99 | |
---|
100 | // is this cell really an integer? |
---|
101 | try { |
---|
102 | Integer.valueOf(datamatrix_celldata) |
---|
103 | } catch (NumberFormatException nfe) { integerBoolean = false } |
---|
104 | finally { |
---|
105 | if (integerBoolean) fieldtype = TemplateFieldType.INTEGER |
---|
106 | } |
---|
107 | |
---|
108 | // it's not an integer, perhaps a double? |
---|
109 | if (!integerBoolean) |
---|
110 | try { |
---|
111 | formatValue(datamatrix_celldata, TemplateFieldType.DOUBLE) |
---|
112 | } catch (NumberFormatException nfe) { doubleBoolean = false } |
---|
113 | finally { |
---|
114 | if (doubleBoolean) fieldtype = TemplateFieldType.DOUBLE |
---|
115 | } |
---|
116 | |
---|
117 | if (HSSFDateUtil.isCellDateFormatted(datamatrix_cell)) fieldtype = TemplateFieldType.DATE |
---|
118 | |
---|
119 | header[columnindex] = new dbnp.importer.MappingColumn(name:df.formatCellValue(headercell), |
---|
120 | templatefieldtype:fieldtype, |
---|
121 | index:columnindex, |
---|
122 | entity:theEntity, |
---|
123 | property:property); |
---|
124 | break |
---|
125 | case HSSFCell.CELL_TYPE_BLANK: |
---|
126 | header[columnindex] = new dbnp.importer.MappingColumn(name:df.formatCellValue(headercell), |
---|
127 | templatefieldtype:TemplateFieldType.STRING, |
---|
128 | index:columnindex, |
---|
129 | entity:theEntity, |
---|
130 | property:property); |
---|
131 | break |
---|
132 | default: |
---|
133 | header[columnindex] = new dbnp.importer.MappingColumn(name:df.formatCellValue(headercell), |
---|
134 | templatefieldtype:TemplateFieldType.STRING, |
---|
135 | index:columnindex, |
---|
136 | entity:theEntity, |
---|
137 | property:property); |
---|
138 | break |
---|
139 | } // end of switch |
---|
140 | } // end of cell loop |
---|
141 | return header |
---|
142 | } |
---|
143 | |
---|
144 | /** |
---|
145 | * This method is meant to return a matrix of the rows and columns |
---|
146 | * used in the preview |
---|
147 | * |
---|
148 | * @param wb workbook object |
---|
149 | * @param sheetindex sheet index used |
---|
150 | * @param rows amount of rows returned |
---|
151 | * @return two dimensional array (matrix) of HSSFCell objects |
---|
152 | */ |
---|
153 | |
---|
154 | HSSFCell[][] getDatamatrix(HSSFWorkbook wb, header, int sheetindex, int datamatrix_start, int count) { |
---|
155 | def sheet = wb.getSheetAt(sheetindex) |
---|
156 | def rows = [] |
---|
157 | def df = new DataFormatter() |
---|
158 | |
---|
159 | // walk through all rows |
---|
160 | (count <= sheet.getLastRowNum()) ? |
---|
161 | ((datamatrix_start+sheet.getFirstRowNum())..count).each { rowindex -> |
---|
162 | def row = [] |
---|
163 | |
---|
164 | // walk through every cell |
---|
165 | /*for (HSSFCell c: sheet.getRow(rowindex)) { |
---|
166 | row.add(c) |
---|
167 | println c.getColumnIndex() + "=" +c |
---|
168 | }*/ |
---|
169 | |
---|
170 | (0..header.size()-1).each { columnindex -> |
---|
171 | def c = sheet.getRow(rowindex).getCell(columnindex, org.apache.poi.ss.usermodel.Row.CREATE_NULL_AS_BLANK) |
---|
172 | //row.add(df.formatCellValue(c)) |
---|
173 | row.add(c) |
---|
174 | //if (c.getCellType() == c.CELL_TYPE_STRING) println "STR"+c.getStringCellValue() |
---|
175 | //if (c.getCellType() == c.CELL_TYPE_NUMERIC) println "INT" +c.getNumericCellValue() |
---|
176 | } |
---|
177 | //row.add(df.formatCellValue(c)) |
---|
178 | rows.add(row) |
---|
179 | } : 0 |
---|
180 | |
---|
181 | return rows |
---|
182 | } |
---|
183 | |
---|
184 | /** |
---|
185 | * This method will move a file to a new location. |
---|
186 | * |
---|
187 | * @param file File object to move |
---|
188 | * @param folderpath folder to move the file to |
---|
189 | * @param filename (new) filename to give |
---|
190 | * @return if file has been moved succesful, the new path and filename will be returned, otherwise an empty string will be returned |
---|
191 | */ |
---|
192 | def moveFile(File file, String folderpath, String filename) { |
---|
193 | try { |
---|
194 | def rnd = ""; //System.currentTimeMillis() |
---|
195 | file.transferTo(new File(folderpath, rnd+filename)) |
---|
196 | return folderpath + filename |
---|
197 | } catch(Exception exception) { |
---|
198 | log.error "File move error, ${exception}" |
---|
199 | return "" |
---|
200 | } |
---|
201 | } |
---|
202 | |
---|
203 | /** |
---|
204 | * @return random numeric value |
---|
205 | */ |
---|
206 | def random = { |
---|
207 | return System.currentTimeMillis() + Runtime.runtime.freeMemory() |
---|
208 | } |
---|
209 | |
---|
210 | /** |
---|
211 | * Method to read data from a workbook and to import data into a two dimensional |
---|
212 | * array |
---|
213 | * |
---|
214 | * @param template_id template identifier to use fields from |
---|
215 | * @param wb POI horrible spreadsheet formatted workbook object |
---|
216 | * @param mcmap linked hashmap (preserved order) of MappingColumns |
---|
217 | * @param sheetindex sheet to use when using multiple sheets |
---|
218 | * @param rowindex first row to start with reading the actual data (NOT the header) |
---|
219 | * @return two dimensional array containing records (with entities) |
---|
220 | * |
---|
221 | * @see dbnp.importer.MappingColumn |
---|
222 | */ |
---|
223 | def importdata(template_id, HSSFWorkbook wb, int sheetindex, int rowindex, mcmap) { |
---|
224 | def sheet = wb.getSheetAt(sheetindex) |
---|
225 | def table = [] |
---|
226 | |
---|
227 | // walk through all rows and fill the table with records |
---|
228 | (rowindex..sheet.getLastRowNum()).each { i -> |
---|
229 | table.add(createRecord(template_id, sheet.getRow(i), mcmap)) |
---|
230 | } |
---|
231 | return table |
---|
232 | } |
---|
233 | |
---|
234 | /** |
---|
235 | * Method to store a matrix containing the entities in a record like structure. Every row in the table |
---|
236 | * contains one or more entity objects (which contain fields with values). So actually a row represents |
---|
237 | * a record with fields from one or more different entities. |
---|
238 | * |
---|
239 | * @param study entity Study |
---|
240 | * @param datamatrix two dimensional array containing entities with values read from Excel file * |
---|
241 | */ |
---|
242 | def saveDatamatrix(Study study, datamatrix) { |
---|
243 | def validatedSuccesfully = 0 |
---|
244 | study.refresh() |
---|
245 | |
---|
246 | // go through the data matrix, read every record and validate the entity and try to persist it |
---|
247 | datamatrix.each { record -> |
---|
248 | record.each { entity -> |
---|
249 | if(entity.validate()) { |
---|
250 | switch (entity.getClass()) { |
---|
251 | case Study : print "Persisting Study `" + entity + "`: " |
---|
252 | persistEntity(entity) |
---|
253 | break |
---|
254 | case Subject : print "Persisting Subject `" + entity + "`: " |
---|
255 | persistEntity(entity) |
---|
256 | study.addToSubjects(entity) |
---|
257 | break |
---|
258 | case Event : print "Persisting Event `" + entity + "`: " |
---|
259 | persistEntity(entity) |
---|
260 | study.addToEvents(entity) |
---|
261 | break |
---|
262 | case Sample : print "Persisting Sample `" + entity +"`: " |
---|
263 | persistEntity(entity) |
---|
264 | study.addToSamples(entity) |
---|
265 | break |
---|
266 | case SamplingEvent: print "Persisting SamplingEvent `" + entity + "`: " |
---|
267 | persistEntity(entity) |
---|
268 | study.addToSamplingEvents(entity) |
---|
269 | break; |
---|
270 | default : println "Skipping persisting of `" + entity.getclass() +"`" |
---|
271 | break |
---|
272 | } // end switch |
---|
273 | validatedSuccesfully++ |
---|
274 | } // end if |
---|
275 | } // end record |
---|
276 | } // end datamatrix |
---|
277 | return validatedSuccesfully |
---|
278 | } |
---|
279 | |
---|
280 | /** |
---|
281 | * Method to persist entities into the database |
---|
282 | * Checks whether entity already exists (based on identifier column 'name') |
---|
283 | * |
---|
284 | * @param entity entity object like Study, Subject, Protocol et cetera |
---|
285 | * |
---|
286 | */ |
---|
287 | def persistEntity(entity) { |
---|
288 | if (!entity.save()) //.merge? |
---|
289 | entity.errors.allErrors.each { |
---|
290 | println it |
---|
291 | } |
---|
292 | } |
---|
293 | |
---|
294 | /** |
---|
295 | * This method creates a record (array) containing entities with values |
---|
296 | * |
---|
297 | * @param template_id template identifier |
---|
298 | * @param excelrow POI based Excel row containing the cells |
---|
299 | * @param mcmap map containing MappingColumn objects |
---|
300 | */ |
---|
301 | def createRecord(template_id, HSSFRow excelrow, mcmap) { |
---|
302 | def df = new DataFormatter() |
---|
303 | def template = Template.get(template_id) |
---|
304 | def record = [] |
---|
305 | |
---|
306 | // Initialize all possible entities with the chosen template |
---|
307 | def study = new Study(template: template) |
---|
308 | def subject = new Subject(template: template) |
---|
309 | def samplingEvent = new SamplingEvent(template: template) |
---|
310 | def event = new Event(template: template) |
---|
311 | def sample = new Sample(template: template) |
---|
312 | |
---|
313 | // Go through the Excel row cell by cell |
---|
314 | for (HSSFCell cell: excelrow) { |
---|
315 | // get the MappingColumn information of the current cell |
---|
316 | def mc = mcmap[cell.getColumnIndex()] |
---|
317 | def value |
---|
318 | |
---|
319 | // Check if column must be imported |
---|
320 | if (!mc.dontimport) { |
---|
321 | try { |
---|
322 | value = formatValue(df.formatCellValue(cell), mc.templatefieldtype) |
---|
323 | } catch (NumberFormatException nfe) { |
---|
324 | value = "" |
---|
325 | } |
---|
326 | |
---|
327 | // which entity does the current cell (field) belong to? |
---|
328 | switch (mc.entity) { |
---|
329 | case Study: (record.any {it.getClass() == mc.entity}) ? 0 : record.add(study) |
---|
330 | study.setFieldValue(mc.property, value) |
---|
331 | break |
---|
332 | case Subject: (record.any {it.getClass() == mc.entity}) ? 0 : record.add(subject) |
---|
333 | subject.setFieldValue(mc.property, value) |
---|
334 | break |
---|
335 | case SamplingEvent: (record.any {it.getClass() == mc.entity}) ? 0 : record.add(samplingEvent) |
---|
336 | samplingEvent.setFieldValue(mc.property, value) |
---|
337 | break |
---|
338 | case Event: (record.any {it.getClass() == mc.entity}) ? 0 : record.add(event) |
---|
339 | event.setFieldValue(mc.property, value) |
---|
340 | break |
---|
341 | case Sample: (record.any {it.getClass() == mc.entity}) ? 0 : record.add(sample) |
---|
342 | sample.setFieldValue(mc.property, value) |
---|
343 | break |
---|
344 | case Object: // don't import |
---|
345 | break |
---|
346 | } // end switch |
---|
347 | } // end |
---|
348 | } // end for |
---|
349 | |
---|
350 | return record |
---|
351 | } |
---|
352 | |
---|
353 | /** |
---|
354 | * Method to parse a value conform a specific type |
---|
355 | * @param value string containing the value |
---|
356 | * @return object corresponding to the TemplateFieldType |
---|
357 | */ |
---|
358 | def formatValue(String value, TemplateFieldType type) throws NumberFormatException { |
---|
359 | switch (type) { |
---|
360 | case TemplateFieldType.STRING : return value.trim() |
---|
361 | case TemplateFieldType.TEXT : return value.trim() |
---|
362 | case TemplateFieldType.INTEGER : return (int) Double.valueOf(value) |
---|
363 | case TemplateFieldType.FLOAT : return Float.valueOf(value.replace(",",".")); |
---|
364 | case TemplateFieldType.DOUBLE : return Double.valueOf(value.replace(",",".")); |
---|
365 | case TemplateFieldType.STRINGLIST : return value.trim() |
---|
366 | case TemplateFieldType.ONTOLOGYTERM : return value.trim() |
---|
367 | case TemplateFieldType.DATE : return value |
---|
368 | default : return value |
---|
369 | } |
---|
370 | } |
---|
371 | |
---|
372 | } |
---|