1 | /** |
---|
2 | * Importer service |
---|
3 | * |
---|
4 | * The importer service handles the import of tabular, comma delimited and Excel format |
---|
5 | * based files. |
---|
6 | * |
---|
7 | * @package importer |
---|
8 | * @author t.w.abma@umcutrecht.nl |
---|
9 | * @since 20100126 |
---|
10 | * |
---|
11 | * Revision information: |
---|
12 | * $Rev: 1416 $ |
---|
13 | * $Author: t.w.abma@umcutrecht.nl $ |
---|
14 | * $Date: 2011-01-19 15:01:08 +0000 (wo, 19 jan 2011) $ |
---|
15 | */ |
---|
16 | |
---|
17 | package dbnp.importer |
---|
18 | import org.apache.poi.ss.usermodel.* |
---|
19 | import org.apache.poi.xssf.usermodel.XSSFCell |
---|
20 | |
---|
21 | import dbnp.studycapturing.TemplateFieldType |
---|
22 | import dbnp.studycapturing.Template |
---|
23 | import dbnp.studycapturing.SamplingEvent |
---|
24 | import dbnp.studycapturing.Study |
---|
25 | import dbnp.studycapturing.Subject |
---|
26 | import dbnp.studycapturing.Event |
---|
27 | import dbnp.studycapturing.Sample |
---|
28 | |
---|
29 | class ImporterService { |
---|
30 | def AuthenticationService |
---|
31 | |
---|
32 | boolean transactional = true |
---|
33 | |
---|
34 | /** |
---|
35 | * @param is input stream representing the (workbook) resource |
---|
36 | * @return high level representation of the workbook |
---|
37 | */ |
---|
38 | Workbook getWorkbook(InputStream is) { |
---|
39 | WorkbookFactory.create(is) |
---|
40 | } |
---|
41 | |
---|
42 | /** |
---|
43 | * @param wb high level representation of the workbook |
---|
44 | * @param sheetindex sheet to use within the workbook |
---|
45 | * @return header representation as a MappingColumn hashmap |
---|
46 | */ |
---|
47 | def getHeader(Workbook wb, int sheetindex, int headerrow, int datamatrix_start, theEntity=null) { |
---|
48 | def sheet = wb.getSheetAt(sheetindex) |
---|
49 | def sheetrow = sheet.getRow(datamatrix_start) |
---|
50 | //def header = [] |
---|
51 | def header = [:] |
---|
52 | def df = new DataFormatter() |
---|
53 | def property = new String() |
---|
54 | |
---|
55 | //for (Cell c: sheet.getRow(datamatrix_start)) { |
---|
56 | |
---|
57 | (0..sheetrow.getLastCellNum() -1 ).each { columnindex -> |
---|
58 | |
---|
59 | //def index = c.getColumnIndex() |
---|
60 | def datamatrix_celltype = sheet.getRow(datamatrix_start).getCell(columnindex,Row.CREATE_NULL_AS_BLANK).getCellType() |
---|
61 | def datamatrix_celldata = df.formatCellValue(sheet.getRow(datamatrix_start).getCell(columnindex)) |
---|
62 | def datamatrix_cell = sheet.getRow(datamatrix_start).getCell(columnindex) |
---|
63 | def headercell = sheet.getRow(headerrow-1+sheet.getFirstRowNum()).getCell(columnindex) |
---|
64 | def tft = TemplateFieldType.STRING //default templatefield type |
---|
65 | |
---|
66 | // Check for every celltype, currently redundant code, but possibly this will be |
---|
67 | // a piece of custom code for every cell type like specific formatting |
---|
68 | |
---|
69 | switch (datamatrix_celltype) { |
---|
70 | case Cell.CELL_TYPE_STRING: |
---|
71 | //parse cell value as double |
---|
72 | def doubleBoolean = true |
---|
73 | def fieldtype = TemplateFieldType.STRING |
---|
74 | |
---|
75 | // is this string perhaps a double? |
---|
76 | try { |
---|
77 | formatValue(datamatrix_celldata, TemplateFieldType.DOUBLE) |
---|
78 | } catch (NumberFormatException nfe) { doubleBoolean = false } |
---|
79 | finally { |
---|
80 | if (doubleBoolean) fieldtype = TemplateFieldType.DOUBLE |
---|
81 | } |
---|
82 | |
---|
83 | header[columnindex] = new dbnp.importer.MappingColumn(name:df.formatCellValue(headercell), |
---|
84 | templatefieldtype:fieldtype, |
---|
85 | index:columnindex, |
---|
86 | entity:theEntity, |
---|
87 | property:property); |
---|
88 | |
---|
89 | break |
---|
90 | case Cell.CELL_TYPE_NUMERIC: |
---|
91 | def fieldtype = TemplateFieldType.LONG |
---|
92 | def doubleBoolean = true |
---|
93 | def longBoolean = true |
---|
94 | |
---|
95 | // is this cell really an integer? |
---|
96 | try { |
---|
97 | Long.valueOf(datamatrix_celldata) |
---|
98 | } catch (NumberFormatException nfe) { longBoolean = false } |
---|
99 | finally { |
---|
100 | if (longBoolean) fieldtype = TemplateFieldType.LONG |
---|
101 | } |
---|
102 | |
---|
103 | // it's not an long, perhaps a double? |
---|
104 | if (!longBoolean) |
---|
105 | try { |
---|
106 | formatValue(datamatrix_celldata, TemplateFieldType.DOUBLE) |
---|
107 | } catch (NumberFormatException nfe) { doubleBoolean = false } |
---|
108 | finally { |
---|
109 | if (doubleBoolean) fieldtype = TemplateFieldType.DOUBLE |
---|
110 | } |
---|
111 | |
---|
112 | if (DateUtil.isCellDateFormatted(datamatrix_cell)) fieldtype = TemplateFieldType.DATE |
---|
113 | |
---|
114 | header[columnindex] = new dbnp.importer.MappingColumn(name:df.formatCellValue(headercell), |
---|
115 | templatefieldtype:fieldtype, |
---|
116 | index:columnindex, |
---|
117 | entity:theEntity, |
---|
118 | property:property); |
---|
119 | break |
---|
120 | case Cell.CELL_TYPE_BLANK: |
---|
121 | header[columnindex] = new dbnp.importer.MappingColumn(name:df.formatCellValue(headercell), |
---|
122 | templatefieldtype:TemplateFieldType.STRING, |
---|
123 | index:columnindex, |
---|
124 | entity:theEntity, |
---|
125 | property:property); |
---|
126 | break |
---|
127 | default: |
---|
128 | header[columnindex] = new dbnp.importer.MappingColumn(name:df.formatCellValue(headercell), |
---|
129 | templatefieldtype:TemplateFieldType.STRING, |
---|
130 | index:columnindex, |
---|
131 | entity:theEntity, |
---|
132 | property:property); |
---|
133 | break |
---|
134 | } // end of switch |
---|
135 | } // end of cell loop |
---|
136 | return header |
---|
137 | } |
---|
138 | |
---|
139 | /** |
---|
140 | * This method is meant to return a matrix of the rows and columns |
---|
141 | * used in the preview |
---|
142 | * |
---|
143 | * @param wb workbook object |
---|
144 | * @param sheetindex sheet index used |
---|
145 | * @param rows amount of rows returned |
---|
146 | * @return two dimensional array (matrix) of Cell objects |
---|
147 | */ |
---|
148 | |
---|
149 | Object[][] getDatamatrix(Workbook wb, header, int sheetindex, int datamatrix_start, int count) { |
---|
150 | def sheet = wb.getSheetAt(sheetindex) |
---|
151 | def rows = [] |
---|
152 | def df = new DataFormatter() |
---|
153 | |
---|
154 | count = (count < sheet.getLastRowNum()) ? count : sheet.getLastRowNum() |
---|
155 | |
---|
156 | // walk through all rows |
---|
157 | ((datamatrix_start+sheet.getFirstRowNum())..count).each { rowindex -> |
---|
158 | def row = [] |
---|
159 | |
---|
160 | (0..header.size()-1).each { columnindex -> |
---|
161 | def c = sheet.getRow(rowindex).getCell(columnindex, Row.CREATE_NULL_AS_BLANK) |
---|
162 | row.add(c) |
---|
163 | } |
---|
164 | |
---|
165 | rows.add(row) |
---|
166 | } |
---|
167 | |
---|
168 | return rows |
---|
169 | } |
---|
170 | |
---|
171 | /** |
---|
172 | * This method will move a file to a new location. |
---|
173 | * |
---|
174 | * @param file File object to move |
---|
175 | * @param folderpath folder to move the file to |
---|
176 | * @param filename (new) filename to give |
---|
177 | * @return if file has been moved succesful, the new path and filename will be returned, otherwise an empty string will be returned |
---|
178 | */ |
---|
179 | def moveFile(File file, String folderpath, String filename) { |
---|
180 | try { |
---|
181 | def rnd = ""; //System.currentTimeMillis() |
---|
182 | file.transferTo(new File(folderpath, rnd+filename)) |
---|
183 | return folderpath + filename |
---|
184 | } catch(Exception exception) { |
---|
185 | log.error "File move error, ${exception}" |
---|
186 | return "" |
---|
187 | } |
---|
188 | } |
---|
189 | |
---|
190 | /** |
---|
191 | * @return random numeric value |
---|
192 | */ |
---|
193 | def random = { |
---|
194 | return System.currentTimeMillis() + Runtime.runtime.freeMemory() |
---|
195 | } |
---|
196 | |
---|
197 | /** |
---|
198 | * Method to read data from a workbook and to import data into a two dimensional |
---|
199 | * array |
---|
200 | * |
---|
201 | * @param template_id template identifier to use fields from |
---|
202 | * @param wb POI horrible spreadsheet formatted workbook object |
---|
203 | * @param mcmap linked hashmap (preserved order) of MappingColumns |
---|
204 | * @param sheetindex sheet to use when using multiple sheets |
---|
205 | * @param rowindex first row to start with reading the actual data (NOT the header) |
---|
206 | * @return two dimensional array containing records (with entities) |
---|
207 | * |
---|
208 | * @see dbnp.importer.MappingColumn |
---|
209 | */ |
---|
210 | def importData(template_id, Workbook wb, int sheetindex, int rowindex, mcmap) { |
---|
211 | def sheet = wb.getSheetAt(sheetindex) |
---|
212 | def template = Template.get(template_id) |
---|
213 | def table = [] |
---|
214 | def failedcells = [] // list of records |
---|
215 | |
---|
216 | // walk through all rows and fill the table with records |
---|
217 | (rowindex..sheet.getLastRowNum()).each { i -> |
---|
218 | // Create an entity record based on a row read from Excel and store the cells which failed to be mapped |
---|
219 | def (record, failed) = createRecord(template, sheet.getRow(i), mcmap) |
---|
220 | |
---|
221 | // Add record with entity and its values to the table |
---|
222 | table.add(record) |
---|
223 | |
---|
224 | // If failed cells have been found, add them to the failed cells list |
---|
225 | if (failed?.importcells?.size()>0) failedcells.add(failed) |
---|
226 | } |
---|
227 | |
---|
228 | return [table,failedcells] |
---|
229 | } |
---|
230 | |
---|
231 | /** Method to put failed cells back into the datamatrix. Failed cells are cell values |
---|
232 | * which could not be stored in an entity (e.g. Humu Supiuns in an ontology field). |
---|
233 | * Empty corrections should not be stored |
---|
234 | * |
---|
235 | * @param datamatrix two dimensional array containing entities and possibly also failed cells |
---|
236 | * @param failedcells list with maps of failed cells in [mappingcolumn, cell] format |
---|
237 | * @param correctedcells map of corrected cells in [cellhashcode, value] format |
---|
238 | **/ |
---|
239 | def saveCorrectedCells(datamatrix, failedcells, correctedcells) { |
---|
240 | |
---|
241 | // Loop through all failed cells (stored as |
---|
242 | failedcells.each { record -> |
---|
243 | record.value.importcells.each { cell -> |
---|
244 | |
---|
245 | // Get the corrected value |
---|
246 | def correctedvalue = correctedcells.find { it.key.toInteger() == cell.getIdentifier()}.value |
---|
247 | |
---|
248 | // Find the record in the table which the mappingcolumn belongs to |
---|
249 | def tablerecord = datamatrix.find { it.hashCode() == record.key } |
---|
250 | |
---|
251 | // Loop through all entities in the record and correct them if necessary |
---|
252 | tablerecord.each { rec -> |
---|
253 | rec.each { entity -> |
---|
254 | try { |
---|
255 | // Update the entity field |
---|
256 | entity.setFieldValue(cell.mappingcolumn.property, correctedvalue) |
---|
257 | //println "Adjusted " + cell.mappingcolumn.property + " to " + correctedvalue |
---|
258 | } |
---|
259 | catch (Exception e) { |
---|
260 | //println "Could not map corrected ontology: " + cell.mappingcolumn.property + " to " + correctedvalue |
---|
261 | } |
---|
262 | } |
---|
263 | } // end of table record |
---|
264 | } // end of cell record |
---|
265 | } // end of failedlist |
---|
266 | } |
---|
267 | |
---|
268 | /** |
---|
269 | * Method to store a matrix containing the entities in a record like structure. Every row in the table |
---|
270 | * contains one or more entity objects (which contain fields with values). So actually a row represents |
---|
271 | * a record with fields from one or more different entities. |
---|
272 | * |
---|
273 | * @param study entity Study |
---|
274 | * @param datamatrix two dimensional array containing entities with values read from Excel file |
---|
275 | */ |
---|
276 | def saveDatamatrix(Study study, datamatrix) { |
---|
277 | def validatedSuccesfully = 0 |
---|
278 | def entitystored = null |
---|
279 | |
---|
280 | // Study passed? Sync data |
---|
281 | if (study!=null) study.refresh() |
---|
282 | |
---|
283 | //study.subjects.each { it.refresh() } |
---|
284 | |
---|
285 | // go through the data matrix, read every record and validate the entity and try to persist it |
---|
286 | datamatrix.each { record -> |
---|
287 | record.each { entity -> |
---|
288 | switch (entity.getClass()) { |
---|
289 | case Study : log.info "Persisting Study `" + entity + "`: " |
---|
290 | entity.owner = AuthenticationService.getLoggedInUser() |
---|
291 | persistEntity(entity) |
---|
292 | break |
---|
293 | case Subject : log.info "Persisting Subject `" + entity + "`: " |
---|
294 | |
---|
295 | // is the current entity not already in the database? |
---|
296 | //entitystored = isEntityStored(entity) |
---|
297 | |
---|
298 | // this entity is new, so add it to the study |
---|
299 | //if (entitystored==null) |
---|
300 | study.addToSubjects(entity) |
---|
301 | |
---|
302 | /*else { // existing entity, so update it |
---|
303 | updateEntity(entitystored, entity) |
---|
304 | updatedentities.add(entity) |
---|
305 | }*/ |
---|
306 | |
---|
307 | break |
---|
308 | case Event : log.info "Persisting Event `" + entity + "`: " |
---|
309 | study.addToEvents(entity) |
---|
310 | break |
---|
311 | case Sample : log.info "Persisting Sample `" + entity +"`: " |
---|
312 | |
---|
313 | // is this sample validatable (sample name unique for example?) |
---|
314 | study.addToSamples(entity) |
---|
315 | |
---|
316 | break |
---|
317 | case SamplingEvent: log.info "Persisting SamplingEvent `" + entity + "`: " |
---|
318 | study.addToSamplingEvents(entity) |
---|
319 | break |
---|
320 | default : log.info "Skipping persisting of `" + entity.getclass() +"`" |
---|
321 | break |
---|
322 | } // end switch |
---|
323 | } // end record |
---|
324 | } // end datamatrix |
---|
325 | |
---|
326 | |
---|
327 | if (study!=null) study.save(failOnError:true) |
---|
328 | //persistEntity(study) |
---|
329 | |
---|
330 | //return [validatedSuccesfully, updatedentities, failedtopersist] |
---|
331 | //return [0,0,0] |
---|
332 | } |
---|
333 | |
---|
334 | /** |
---|
335 | * Check whether an entity already exist. A unique field in the entity is |
---|
336 | * used to check whether the instantiated entity (read from Excel) is new. |
---|
337 | * If the entity is found in the database it will be returned as is. |
---|
338 | * |
---|
339 | * @param entity entity object like a Study, Subject, Sample et cetera |
---|
340 | * @return entity if found, otherwise null |
---|
341 | */ |
---|
342 | def isEntityStored(entity) { |
---|
343 | switch (entity.getClass()) { |
---|
344 | case Study : return Study.findByCode(entity.code) |
---|
345 | break |
---|
346 | case Subject : return Subject.findByParentAndName(entity.parent, entity.name) |
---|
347 | break |
---|
348 | case Event : break |
---|
349 | case Sample : |
---|
350 | break |
---|
351 | case SamplingEvent : break |
---|
352 | default : // unknown entity |
---|
353 | return null |
---|
354 | } |
---|
355 | } |
---|
356 | |
---|
357 | /** |
---|
358 | * Find the entity and update the fields. The entity is an instance |
---|
359 | * read from Excel. This method looks in the database for the entity |
---|
360 | * having the same identifier. If it has found the same entity |
---|
361 | * already in the database, it will update the record. |
---|
362 | * |
---|
363 | * @param entitystored existing record in the database to update |
---|
364 | * @param entity entity read from Excel |
---|
365 | */ |
---|
366 | def updateEntity(entitystored, entity) { |
---|
367 | switch (entity.getClass()) { |
---|
368 | case Study : break |
---|
369 | case Subject : entitystored.properties = entity.properties |
---|
370 | entitystored.save() |
---|
371 | break |
---|
372 | case Event : break |
---|
373 | case Sample : break |
---|
374 | case SamplingEvent : break |
---|
375 | default : // unknown entity |
---|
376 | return null |
---|
377 | } |
---|
378 | } |
---|
379 | |
---|
380 | /** |
---|
381 | * Method to persist entities into the database |
---|
382 | * Checks whether entity already exists (based on identifier column 'name') |
---|
383 | * |
---|
384 | * @param entity entity object like Study, Subject, Protocol et cetera |
---|
385 | * |
---|
386 | */ |
---|
387 | boolean persistEntity(entity) { |
---|
388 | log.info ".import wizard persisting ${entity}" |
---|
389 | |
---|
390 | try { |
---|
391 | entity.save(flush:true) |
---|
392 | return true |
---|
393 | |
---|
394 | } catch (Exception e) { |
---|
395 | def session = sessionFactory.currentSession |
---|
396 | session.setFlushMode(org.hibernate.FlushMode.MANUAL) |
---|
397 | log.error ".import wizard, failed to save entity:\n" + org.apache.commons.lang.exception.ExceptionUtils.getRootCauseMessage(e) |
---|
398 | } |
---|
399 | |
---|
400 | return true |
---|
401 | } |
---|
402 | |
---|
403 | /** |
---|
404 | * This method creates a record (array) containing entities with values |
---|
405 | * |
---|
406 | * @param template_id template identifier |
---|
407 | * @param excelrow POI based Excel row containing the cells |
---|
408 | * @param mcmap map containing MappingColumn objects |
---|
409 | * @return list of entities and list of failed cells |
---|
410 | */ |
---|
411 | def createRecord(template, Row excelrow, mcmap) { |
---|
412 | def df = new DataFormatter() |
---|
413 | def tft = TemplateFieldType |
---|
414 | def record = [] // list of entities and the read values |
---|
415 | def failed = new ImportRecord() // map with entity identifier and failed mappingcolumn |
---|
416 | |
---|
417 | // Initialize all possible entities with the chosen template |
---|
418 | def study = new Study(template: template) |
---|
419 | def subject = new Subject(template: template) |
---|
420 | def samplingEvent = new SamplingEvent(template: template) |
---|
421 | def event = new Event(template: template) |
---|
422 | def sample = new Sample(template: template) |
---|
423 | |
---|
424 | // Go through the Excel row cell by cell |
---|
425 | for (Cell cell: excelrow) { |
---|
426 | // get the MappingColumn information of the current cell |
---|
427 | def mc = mcmap[cell.getColumnIndex()] |
---|
428 | def value |
---|
429 | |
---|
430 | // Check if column must be imported |
---|
431 | if (mc!=null) if (!mc.dontimport) { |
---|
432 | try { |
---|
433 | value = formatValue(df.formatCellValue(cell), mc.templatefieldtype) |
---|
434 | } catch (NumberFormatException nfe) { |
---|
435 | value = "" |
---|
436 | } |
---|
437 | |
---|
438 | try { |
---|
439 | // which entity does the current cell (field) belong to? |
---|
440 | switch (mc.entity) { |
---|
441 | case Study: // does the entity already exist in the record? If not make it so. |
---|
442 | (record.any {it.getClass() == mc.entity}) ? 0 : record.add(study) |
---|
443 | study.setFieldValue(mc.property, value) |
---|
444 | break |
---|
445 | case Subject: (record.any {it.getClass() == mc.entity}) ? 0 : record.add(subject) |
---|
446 | subject.setFieldValue(mc.property, value) |
---|
447 | break |
---|
448 | case SamplingEvent: (record.any {it.getClass() == mc.entity}) ? 0 : record.add(samplingEvent) |
---|
449 | samplingEvent.setFieldValue(mc.property, value) |
---|
450 | break |
---|
451 | case Event: (record.any {it.getClass() == mc.entity}) ? 0 : record.add(event) |
---|
452 | event.setFieldValue(mc.property, value) |
---|
453 | break |
---|
454 | case Sample: (record.any {it.getClass() == mc.entity}) ? 0 : record.add(sample) |
---|
455 | sample.setFieldValue(mc.property, value) |
---|
456 | break |
---|
457 | case Object: // don't import |
---|
458 | break |
---|
459 | } // end switch |
---|
460 | } catch (Exception iae) { |
---|
461 | log.error ".import wizard error could not set property `" + mc.property + "` to value `" + value + "`" |
---|
462 | // store the mapping column and value which failed |
---|
463 | def identifier |
---|
464 | |
---|
465 | switch (mc.entity) { |
---|
466 | case Study: identifier = study.getIdentifier() |
---|
467 | break |
---|
468 | case Subject: identifier = subject.getIdentifier() |
---|
469 | break |
---|
470 | case SamplingEvent: identifier = samplingEvent.getIdentifier() |
---|
471 | break |
---|
472 | case Event: identifier = event.getIdentifier() |
---|
473 | break |
---|
474 | case Sample: identifier = sample.getIdentifier() |
---|
475 | break |
---|
476 | case Object: // don't import |
---|
477 | break |
---|
478 | } |
---|
479 | |
---|
480 | def mcInstance = new MappingColumn() |
---|
481 | mcInstance.properties = mc.properties |
---|
482 | failed.addToImportcells(new ImportCell(mappingcolumn:mcInstance, value:value, entityidentifier:identifier)) |
---|
483 | } |
---|
484 | } // end |
---|
485 | } // end for |
---|
486 | // a failed column means that using the entity.setFieldValue() threw an exception |
---|
487 | return [record, failed] |
---|
488 | } |
---|
489 | |
---|
490 | /** |
---|
491 | * Method to parse a value conform a specific type |
---|
492 | * @param value string containing the value |
---|
493 | * @return object corresponding to the TemplateFieldType |
---|
494 | */ |
---|
495 | def formatValue(String value, TemplateFieldType type) throws NumberFormatException { |
---|
496 | switch (type) { |
---|
497 | case TemplateFieldType.STRING : return value.trim() |
---|
498 | case TemplateFieldType.TEXT : return value.trim() |
---|
499 | case TemplateFieldType.LONG : return (long) Double.valueOf(value) |
---|
500 | //case TemplateFieldType.FLOAT : return Float.valueOf(value.replace(",",".")); |
---|
501 | case TemplateFieldType.DOUBLE : return Double.valueOf(value.replace(",",".")); |
---|
502 | case TemplateFieldType.STRINGLIST : return value.trim() |
---|
503 | case TemplateFieldType.ONTOLOGYTERM : return value.trim() |
---|
504 | case TemplateFieldType.DATE : return value |
---|
505 | default : return value |
---|
506 | } |
---|
507 | } |
---|
508 | |
---|
509 | // classes for fuzzy string matching |
---|
510 | // <FUZZY MATCHING> |
---|
511 | static def similarity(l_seq, r_seq, degree=2) { |
---|
512 | def l_histo = countNgramFrequency(l_seq, degree) |
---|
513 | def r_histo = countNgramFrequency(r_seq, degree) |
---|
514 | |
---|
515 | dotProduct(l_histo, r_histo) / |
---|
516 | Math.sqrt(dotProduct(l_histo, l_histo) * |
---|
517 | dotProduct(r_histo, r_histo)) |
---|
518 | } |
---|
519 | |
---|
520 | static def countNgramFrequency(sequence, degree) { |
---|
521 | def histo = [:] |
---|
522 | def items = sequence.size() |
---|
523 | |
---|
524 | for (int i = 0; i + degree <= items; i++) |
---|
525 | { |
---|
526 | def gram = sequence[i..<(i + degree)] |
---|
527 | histo[gram] = 1 + histo.get(gram, 0) |
---|
528 | } |
---|
529 | histo |
---|
530 | } |
---|
531 | |
---|
532 | static def dotProduct(l_histo, r_histo) { |
---|
533 | def sum = 0 |
---|
534 | l_histo.each { key, value -> |
---|
535 | sum = sum + l_histo[key] * r_histo.get(key, 0) |
---|
536 | } |
---|
537 | sum |
---|
538 | } |
---|
539 | |
---|
540 | static def stringSimilarity (l_str, r_str, degree=2) { |
---|
541 | |
---|
542 | similarity(l_str.toString().toLowerCase().toCharArray(), |
---|
543 | r_str.toString().toLowerCase().toCharArray(), |
---|
544 | degree) |
---|
545 | } |
---|
546 | |
---|
547 | static def mostSimilar(pattern, candidates, threshold=0) { |
---|
548 | def topScore = 0 |
---|
549 | def bestFit = null |
---|
550 | |
---|
551 | candidates.each { candidate -> |
---|
552 | def score = stringSimilarity(pattern, candidate) |
---|
553 | if (score > topScore) { |
---|
554 | topScore = score |
---|
555 | bestFit = candidate |
---|
556 | } |
---|
557 | } |
---|
558 | |
---|
559 | if (topScore < threshold) |
---|
560 | bestFit = null |
---|
561 | |
---|
562 | bestFit |
---|
563 | } |
---|
564 | // </FUZZY MATCHING> |
---|
565 | |
---|
566 | } |
---|