wbia.algo.smk package

Submodules

wbia.algo.smk.inverted_index module

class wbia.algo.smk.inverted_index.InvertedAnnots[source]

Bases: wbia.algo.smk.inverted_index.InvertedAnnotsExtras

CommandLine:

python -m wbia.algo.smk.inverted_index InvertedAnnots –show

Ignore:
>>> from wbia.algo.smk.inverted_index import *  # NOQA
>>> import wbia
>>> qreq_ = wbia.testdata_qreq_(defaultdb='Oxford', a='oxford',
>>>                              p='default:proot=smk,nAssign=1,num_words=64000')
>>> config = qreq_.qparams
>>> ibs = qreq_.ibs
>>> depc = qreq_.ibs.depc
>>> aids = qreq_.daids
>>> aids = qreq_.qaids
>>> input_tuple = (aids, [qreq_.daids])
>>> inva = ut.DynStruct()
>>> inva = InvertedAnnots(aids, qreq_)

Example

>>> # DISABLE_DOCTEST
>>> qreq_, inva = testdata_inva()
compute_gammas(alpha, thresh)[source]

Example

>>> # xdoctest: +REQUIRES(--slow)
>>> # ENABLE_DOCTEST
>>> from wbia.algo.smk.inverted_index import *  # NOQA
>>> qreq_, inva = testdata_inva()
>>> inva.wx_to_weight = inva.compute_word_weights('uniform')
>>> alpha = 3.0
>>> thresh = 0.0
>>> gamma_list = inva.compute_gammas(alpha, thresh)
compute_inverted_list()[source]
compute_word_weights(method='idf')[source]

Compute a per-word weight like idf

Example

>>> # xdoctest: +REQUIRES(--slow)
>>> # ENABLE_DOCTEST
>>> from wbia.algo.smk.inverted_index import *  # NOQA
>>> qreq_, inva = testdata_inva()
>>> wx_to_weight = inva.compute_word_weights()
>>> print('wx_to_weight = %r' % (wx_to_weight,))
classmethod from_depc(depc, aids, vocab_aids, config)[source]
get_annot(aid)[source]
rrr(verbose=True, reload_module=True)

special class reloading function This function is often injected as rrr of classes

property wx_list
class wbia.algo.smk.inverted_index.InvertedAnnotsExtras[source]

Bases: object

get_nbytes()[source]
get_patches(wx, ibs, verbose=True)[source]

Loads the patches assigned to a particular word in this stack

>>> inva.wx_to_aids = inva.compute_inverted_list()
>>> verbose=True
get_size_info()[source]
get_word_patch(wx, ibs)[source]
print_size_info()[source]
render_inverted_vocab(ibs, use_data=False)[source]

Renders the average patch of each word. This is a visualization of the entire vocabulary.

CommandLine:

python -m wbia.algo.smk.inverted_index render_inverted_vocab –show python -m wbia.algo.smk.inverted_index render_inverted_vocab –show –use-data python -m wbia.algo.smk.inverted_index render_inverted_vocab –show –debug-depc

Example

>>> # DISABLE_DOCTEST
>>> from wbia.algo.smk.inverted_index import *  # NOQA
>>> qreq_, inva = testdata_inva()
>>> ibs = qreq_.ibs
>>> all_words = inva.render_inverted_vocab(ibs)
>>> ut.quit_if_noshow()
>>> import wbia.plottool as pt
>>> pt.qt4ensure()
>>> pt.imshow(all_words)
>>> ut.show_if_requested()
render_inverted_vocab_word(wx, ibs, fnum=None)[source]

Creates a visualization of a visual word. This includes the average patch, the SIFT-like representation of the centroid, and some of the patches that were assigned to it.

CommandLine:

python -m wbia.algo.smk.inverted_index render_inverted_vocab_word –show

Example

>>> # DISABLE_DOCTEST
>>> from wbia.algo.smk.inverted_index import *  # NOQA
>>> import wbia.plottool as pt
>>> qreq_, inva = testdata_inva()
>>> ibs = qreq_.ibs
>>> wx_list = list(inva.wx_to_aids.keys())
>>> wx = wx_list[0]
>>> ut.qtensure()
>>> fnum = 2
>>> fnum = pt.ensure_fnum(fnum)
>>> # Interactive visualization of many words
>>> for wx in ut.InteractiveIter(wx_list):
>>>     word_img = inva.render_inverted_vocab_word(wx, ibs, fnum)
>>>     pt.imshow(word_img, fnum=fnum, title='Word %r/%r' % (wx, '?'))
>>>     pt.update()
class wbia.algo.smk.inverted_index.InvertedIndexConfig(**kwargs)[source]

Bases: wbia.dtool.base.Config

class wbia.algo.smk.inverted_index.SingleAnnot[source]

Bases: utool.util_dev.NiceRepr

Phis_flags(idxs)[source]

get subset of aggregated residual vectors

classmethod from_inva(inva, idx)[source]
fxs(c)[source]
maws(c)[source]
nbytes()[source]
nbytes_info()[source]
phis_flags_list(idxs)[source]

get subset of non-aggregated residual vectors

rrr(verbose=True, reload_module=True)

special class reloading function This function is often injected as rrr of classes

to_dense(inva=None, out=None)[source]
property words
wbia.algo.smk.inverted_index.compute_residual_assignments(depc, fid_list, vocab_id_list, config)[source]
CommandLine:
python -m wbia.control.IBEISControl show_depc_annot_table_input

–show –tablename=residuals

Ignore:

ibs.depc[‘vocab’].print_table()

Ignore:

data = ibs.depc.get(‘inverted_agg_assign’, ([1, 2473], qreq_.daids), config=qreq_.config) wxs1 = data[0][0] wxs2 = data[1][0]

# Lev Example import wbia ibs = wbia.opendb(‘Oxford’) depc = ibs.depc table = depc[‘inverted_agg_assign’] table.print_table() table.print_internal_info()

Example

>>> # DISABLE_DOCTEST
>>> from wbia.algo.smk.inverted_index import *  # NOQA
>>> # Test depcache access
>>> import wbia
>>> ibs, aid_list = wbia.testdata_aids('testdb1')
>>> depc = ibs.depc_annot
>>> config = {'num_words': 1000, 'nAssign': 1}
>>> #input_tuple = (aid_list, [aid_list] * len(aid_list))
>>> daids = aid_list
>>> input_tuple = (daids, [daids])
>>> rowid_kw = {}
>>> tablename = 'inverted_agg_assign'
>>> target_tablename = tablename
>>> input_ids = depc.get_parent_rowids(tablename, input_tuple, config)
>>> fid_list = ut.take_column(input_ids, 0)
>>> vocab_id_list = ut.take_column(input_ids, 1)
>>> data = depc.get(tablename, input_tuple, config)
>>> tup = dat[1]

Example

>>> # DISABLE_DOCTEST
>>> from wbia.algo.smk.inverted_index import *  # NOQA
>>> import wbia
>>> qreq_ = wbia.testdata_qreq_(defaultdb='Oxford', a='oxford', p='default:proot=smk,nAssign=1,num_words=64000')
>>> config = {'num_words': 64000, 'nAssign': 1, 'int_rvec': True}
>>> depc = qreq_.ibs.depc
>>> daids = qreq_.daids
>>> input_tuple = (daids, [daids])
>>> rowid_kw = {}
>>> tablename = 'inverted_agg_assign'
>>> target_tablename = tablename
>>> input_ids = depc.get_parent_rowids(tablename, input_tuple, config)
>>> fid_list = ut.take_column(input_ids, 0)
>>> vocab_id_list = ut.take_column(input_ids, 1)
wbia.algo.smk.inverted_index.gen_residual_args(vocab, vecs_list, nAssign, int_rvec)[source]
wbia.algo.smk.inverted_index.residual_args(vocab, vecs, nAssign, int_rvec)[source]
wbia.algo.smk.inverted_index.residual_worker(argtup)[source]
wbia.algo.smk.inverted_index.testdata_inva()[source]

from wbia.algo.smk.inverted_index import * # NOQA

wbia.algo.smk.match_chips5 module

TODO: semantic_uuids should be replaced with PCC-like hashes pertaining to annotation clusters if any form of name scoring is used.

class wbia.algo.smk.match_chips5.EstimatorRequest[source]

Bases: utool.util_dev.NiceRepr

property dnids

save dnids in qreq_ state

Type

TODO

ensure_nids()[source]
execute(qaids=None, prog_hook=None, use_cache=True)[source]
property extern_data_config2
property extern_query_config2
get_cfgstr(with_input=False, with_data=True, with_pipe=True, hash_pipe=False)[source]
get_chipmatch_fpaths(qaid_list)[source]

Efficient function to get a list of chipmatch paths

get_data_hashid()[source]
get_nice_parts()[source]
get_pipe_cfgstr()[source]
get_pipe_hashid()[source]
get_qreq_annot_gids(aids)[source]
get_qreq_annot_nids(aids)[source]
get_query_hashid()[source]
property qnids

save qnids in qreq_ state

Type

TODO

shallowcopy(qaids=None)[source]

Creates a copy of qreq with the same qparams object and a subset of the qx and dx objects. used to generate chunks of vsone and vsmany queries

Example

>>> # ENABLE_DOCTEST
>>> from wbia.algo.smk.match_chips5 import *  # NOQA
>>> from wbia.algo.smk.smk_pipeline import testdata_smk
>>> import wbia
>>> wbia, smk, qreq_ = testdata_smk()
>>> qreq2_ = qreq_.shallowcopy(qaids=1)
>>> assert qreq_.daids is qreq2_.daids, 'should be the same'
>>> assert len(qreq_.qaids) != len(qreq2_.qaids), 'should be diff'
>>> #assert qreq_.metadata is not qreq2_.metadata
wbia.algo.smk.match_chips5.execute_and_save(qreq_miss)[source]
wbia.algo.smk.match_chips5.execute_bulk(qreq_)[source]
wbia.algo.smk.match_chips5.execute_singles(qreq_)[source]

wbia.algo.smk.pickle_flann module

class wbia.algo.smk.pickle_flann.PickleFLANN(**kwargs)[source]

Bases: pyflann.index.FLANN

Adds the ability to pickle a flann class on a unix system. (Actually, pickle still wont work because we need the original point data. But we can do a custom dumps and a loads)

CommandLine:

python -m wbia.algo.smk.pickle_flann PickleFLANN

Example

>>> # ENABLE_DOCTEST
>>> from wbia.algo.smk.pickle_flann import *  # NOQA
>>> import numpy as np
>>> rng = np.random.RandomState(42)
>>> data = rng.rand(10, 2)
>>> query = rng.rand(5, 2)
>>> flann = PickleFLANN()
>>> flann.build_index(data, random_seed=42)
>>> index_bytes = flann.dumps()
>>> flann2 = PickleFLANN()
>>> flann2.loads(index_bytes, data)
>>> assert flann2 is not flann
>>> assert flann2.dumps() == index_bytes
>>> idx1 = flann.nn_index(query)[0]
>>> idx2 = flann2.nn_index(query)[0]
>>> assert np.all(idx1 == idx2)
dumps()[source]

# Make a special wordflann pickle http://www.linuxscrew.com/2010/03/24/fastest-way-to-create-ramdisk-in-ubuntulinux/ sudo mkdir /tmp/ramdisk; chmod 777 /tmp/ramdisk sudo mount -t tmpfs -o size=256M tmpfs /tmp/ramdisk/ http://zeblog.co/?p=1588

loads(index_bytes, pts)[source]
class wbia.algo.smk.pickle_flann.Win32CompatTempFile(delete=True, verbose=False)[source]

Bases: object

mimics tempfile.NamedTemporaryFile but allows the file to be closed without being deleted. This lets a second process (like the FLANN) read/write to the file in a win32 system. The file is instead deleted after the Win32CompatTempFile object goes out of scope.

Example

>>> # ENABLE_DOCTEST
>>> from wbia.algo.smk.pickle_flann import *  # NOQA
>>> verbose = True
>>> temp = Win32CompatTempFile(verbose=verbose)
>>> data = '10010'
>>> data = data.encode()
>>> print('data = %r' % (data,))
>>> data1 = temp.read()
>>> print('data1 = %r' % (data1,))
>>> temp.write(data)
>>> data2 = temp.read()
>>> print('data2 = %r' % (data2,))
>>> temp.close()
>>> assert data != data1
>>> assert data == data2
>>> ut.assert_raises(ValueError, temp.close)
>>> assert not ut.checkpath(temp.fpath, verbose=verbose)
close()[source]
property name
read()[source]
write(data)[source]

wbia.algo.smk.script_smk module

Results so far without SV / fancyness Using standard descriptors / vocabulary

proot=bow,nWords=1E6 -> .594 proot=asmk,nWords=1E6 -> .529

Note

  • Results from SMK Oxford Paper (mAP)

ASMK nAssign=1, SV=False: .78 ASMK nAssign=5, SV=False: .82

Philbin with tf-idf ranking SV=False SIFT: .636, RootSIFT: .683 (+.05)

Philbin with tf-idf ranking SV=True SIFT: .672, RootSIFT: .720 (+.05)

  • My Results (WITH BAD QUERY BBOXES)

smk:nAssign=1,SV=True,: .58 smk:nAssign=1,SV=False,: .38

Yesterday I got .22 when I fixed the bounding boxes And now I’m getting .08 and .32 (sv=[F,T]) after deleting and redoing everything (also removing junk images) After fix of normalization I get .38 and .44

Using oxford descriptors I get .51ish Then changing to root-sift I smk-bow = get=0.56294936807700813 Then using tfidf-bow2=0.56046968275748565 asmk-gets 0.54146

Going down to 8K words smk-BOW gets .153 Going down to 8K words tfidf-BOW gets .128 Going down to 8K words smk-asmk gets 0.374

Ok the 65K vocab smk-asmk gets mAP=0.461… Ok, after recomputing a new 65K vocab with centered and root-sifted

descriptors, using float32 precision (in most places), asmk gets a new map score of: mAP=.5275… :( This is with permissive query kpts and oxford vocab. Next step: ensure everything is float32. Ensured float32 mAP=.5279, … better but indiciative of real error

After that try again at Jegou’s data. Ensure there are no smk algo bugs. There must be one.

FINALLY! Got Jegou’s data working. With jegou percmopute oxford feats, words, and assignments And float32 version asmk = .78415 bow = .545

asmk got 0.78415 with float32 version bow got .545 bot2 got .551

vecs07, root_sift, approx assign, (either jegou or my words) mAP=.673

Weird: vecs07, root_sift, exact assign, Maybe jegou words or maybe my words. Can’t quite tell. Might have messed with a config. mAP=0.68487357885738664

October 8 Still using the same descriptors, but my own vocab with approx assign mAP = 0.78032

my own vocab approx assign, no center map = .793

The problem was minibatch params. Need higher batch size and init size. Needed to modify sklearn to handle this requirement.

Using my own descriptors I got 0.7460. Seems good.

Now, back to the HS pipeline. Getting a 0.638, so there is an inconsistency. Should be getting .7460. Maybe I gotta root_sift it up?

Turned off root_sift in script got .769, so there is a problem in system script minibatch 29566/270340… rate=0.86 Hz, eta=0:00:00, total=9:44:35, wall=05:24 EST inertia: mean batch=53730.923812, ewa=53853.439903 now need to try turning off float32

Differences Between this and SMK:
  • No RootSIFT

  • No SIFT Centering

  • No Independent Vocab

  • Chip RESIZE

Differences between this and VLAD
  • residual vectors are normalized

  • larger default vocabulary size

Feat Info

name | num_vecs | n_annots |

Oxford13 | 12,534,635 | | Oxford07 | 16,334,970 | | mine1 | 8,997,955 | | mine2 | 13,516,721 | 5063 | mine3 | 8,371,196 | 4728 | mine4 | 8,482,137 | 4783 |

Cluster Algo Config

name | algo | init | init_size | batch size | ==========================================================================| minibatch1 | minibatch kmeans | kmeans++ | num_words * 4 | 100 | minibatch2 | minibatch kmeans | kmeans++ | num_words * 4 | 1000 | given13 | Lloyd? | kmeans++? | num_words * 8? | nan? |

Assign Algo Config

name | algo | trees | checks |

approx | kdtree | 8 | 1024 | exact | linear | nan | nan | exact | linear | nan | nan |

SMK Results

tagid | mAP | train_feats | test_feats | center | rootSIFT | assign | num_words | cluster methods | int | only_xy |
0.38 | mine1 | mine1 | | | approx | 64000 | minibatch1 | | |
0.541 | oxford07 | oxford07 | | X | approx | 2 ** 16 | minibatch1 | | X |
0.673 | oxford13 | oxford13 | X | X | approx | 2 ** 16 | minibatch1 | | X |
0.684 | oxford13 | oxford13 | X | X | exact | 2 ** 16 | minibatch1 | | X |
mybest | 0.793 | oxford13 | oxford13 | | X | approx | 2 ** 16 | minibatch2 | | X |
0.780 | oxford13 | oxford13 | X | X | approx | 2 ** 16 | minibatch2 | | X |
0.788 | paras13 | oxford13 | X | X | approx | 2 ** 16 | given13 | | X |

allgiven | 0.784 | paras13 | oxford13 | X | X | given13 | 2 ** 16 | given13 | | X |

reported13 | 0.781 | paras13 | oxford13 | X | X | given13 | 2 ** 16 | given13 | | X |

inhouse1 | 0.746 | mine2 | mine2 | | X | approx | 2 ** 16 | minibatch2 | | X | inhouse2 | 0.769 | mine2 | mine2 | | | approx | 2 ** 16 | minibatch2 | | X | inhouse3 | 0.769 | mine2 | mine2 | | | approx | 2 ** 16 | minibatch2 | X | X | inhouse4 | 0.751 | mine2 | mine2 | | | approx | 2 ** 16 | minibatch2 | X | |

sysharn1 | 0.638 | mine3 | mine3 | | | approx | 64000 | minibatch2 | X | | sysharn2 | 0.713 | mine3 | mine4 | | | approx | 64000 | minibatch2 | X | |

In the SMK paper they report 0.781 as shown in the table, but they also report a score of 0.820 when increasing the number of features to from 12.5M to 19.2M by lowering feature detection thresholds.

class wbia.algo.smk.script_smk.SMK(wx_to_weight, method='asmk', **kwargs)[source]

Bases: utool.util_dev.NiceRepr

gamma(X)[source]

Compute gamma of X

gamma(X) = (M(X, X)) ** (-1/2)

kernel_bow_tfidf(X, Y)[source]
kernel_smk(X, Y)[source]
match_score_agg(X, Y)[source]
match_score_bow(X, Y)[source]
match_score_sep(X, Y)[source]
word_isect(X, Y)[source]
class wbia.algo.smk.script_smk.SparseVector(_dict)[source]

Bases: utool.util_dev.NiceRepr

dot(other)[source]
wbia.algo.smk.script_smk.bow_vector(X, wx_to_weight, nwords)[source]
wbia.algo.smk.script_smk.check_image_sizes(data_uri_order, all_kpts, offset_list)[source]

Check if any keypoints go out of bounds wrt their associated images

wbia.algo.smk.script_smk.compare_data(Y_list_)[source]
wbia.algo.smk.script_smk.ensure_tf(X)[source]
wbia.algo.smk.script_smk.get_annots_imgid(_annots)[source]
wbia.algo.smk.script_smk.hyrule_vocab_test()[source]
wbia.algo.smk.script_smk.kpts_inside_bbox(kpts, bbox, only_xy=False)[source]
wbia.algo.smk.script_smk.load_internal_data()[source]
wbia TestResult –db Oxford

-p smk:nWords=[64000],nAssign=[1],SV=[False],can_match_sameimg=True,dim_size=None -a oxford –dev-mode

wbia TestResult –db GZ_Master1

-p smk:nWords=[64000],nAssign=[1],SV=[False],fg_on=False -a ctrl:qmingt=2 –dev-mode

wbia.algo.smk.script_smk.load_ordered_annots(data_uri_order, query_uri_order)[source]
wbia.algo.smk.script_smk.load_oxford_2007()[source]

Loads data from http://www.robots.ox.ac.uk:5000/~vgg/publications/2007/Philbin07/philbin07.pdf

>>> from wbia.algo.smk.script_smk import *  # NOQA
wbia.algo.smk.script_smk.load_oxford_2013()[source]

Found this data in README of SMK publication https://hal.inria.fr/hal-00864684/document http://people.rennes.inria.fr/Herve.Jegou/publications.html with download script

CommandLine:

# Download oxford13 data cd ~/work/Oxford mkdir -p smk_data_iccv_2013 cd smk_data_iccv_2013 wget -nH –cut-dirs=4 -r -Pdata/ ftp://ftp.irisa.fr/local/texmex/corpus/iccv2013/

This dataset has 5063 images wheras 07 has 5062 This dataset seems to contain an extra junk image:

ashmolean_000214

# Remember that matlab is 1 indexed! # DONT FORGET TO CONVERT TO 0 INDEXING!

wbia.algo.smk.script_smk.load_oxford_wbia()[source]
wbia.algo.smk.script_smk.make_agg_vecs(X, words, fx_to_vecs)[source]
wbia.algo.smk.script_smk.make_temporary_annot(aid, vocab, wx_to_weight, ibs, config)[source]
wbia.algo.smk.script_smk.new_external_annot(aid, fx_to_wxs, fx_to_maws, int_rvec)[source]
wbia.algo.smk.script_smk.oxford_conic_test()[source]
wbia.algo.smk.script_smk.run_asmk_script()[source]
wbia.algo.smk.script_smk.sanity_checks(offset_list, Y_list, query_annots, ibs)[source]
wbia.algo.smk.script_smk.show_data_image(data_uri_order, i, offset_list, all_kpts, all_vecs)[source]

i = 12

wbia.algo.smk.script_smk.verify_score()[source]

Recompute all SMK things for two annotations and compare scores.

>>> from wbia.algo.smk.script_smk import *  # NOQA

cm.print_inspect_str(qreq_) cm.show_single_annotmatch(qreq_, daid1) cm.show_single_annotmatch(qreq_, daid2)

wbia.algo.smk.smk_funcs module

References

Jegou’s Source Code, Data, and Publications http://people.rennes.inria.fr/Herve.Jegou/publications.html

To aggregate or not to aggregate: selective match kernels for image search https://hal.inria.fr/hal-00864684/document

Image search with selective match kernels: aggregation across single and multiple images http://image.ntua.gr/iva/files/Tolias_ijcv15_iasmk.pdf

Negative evidences and co-occurrences in image retrieval: the benefit of PCA and whitening https://hal.inria.fr/file/index/docid/722626/filename/jegou_chum_eccv2012.pdf

Revisiting the VLAD image representation https://hal.inria.fr/file/index/docid/850249/filename/nextvlad_hal.pdf

Aggregating local descriptors into a compact image representation https://lear.inrialpes.fr/pubs/2010/JDSP10/jegou_compactimagerepresentation.pdf

Large-scale image retrieval with compressed Fisher vectors http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.401.9140&rep=rep1&type=pdf

Improving Bag of Features http://lear.inrialpes.fr/pubs/2010/JDS10a/jegou_improvingbof_preprint.pdf

Lost in Quantization http://www.robots.ox.ac.uk/~vgg/publications/papers/philbin08.ps.gz

A Context Dissimilarity Measure for Accurate and Efficient Image Search https://lear.inrialpes.fr/pubs/2007/JHS07/jegou_cdm.pdf

Video Google: A text retrieval approach to object matching in videos http://www.robots.ox.ac.uk/~vgg/publications/papers/sivic03.pdf

Hamming embedding and weak geometric consistency for large scale image search https://lear.inrialpes.fr/pubs/2008/JDS08/jegou_hewgc08.pdf

Three things everyone should know to improve object retrieval https://www.robots.ox.ac.uk/~vgg/publications/2012/Arandjelovic12/arandjelovic12.pdf

Object retrieval with large vocabularies and fast spatial matching http://www.robots.ox.ac.uk:5000/~vgg/publications/2007/Philbin07/philbin07.pdf

Aggregating Local Descriptors into Compact Codes https://hal.inria.fr/file/index/docid/633013/filename/jegou_aggregate.pdf

Local visual query expansion https://hal.inria.fr/hal-00840721/PDF/RR-8325.pdf

Root SIFT technique https://hal.inria.fr/hal-00688169/document

Fisher Kernel For Large Scale Classification https://www.robots.ox.ac.uk/~vgg/rg/papers/peronnin_etal_ECCV10.pdf

Orientation covariant aggregation of local descriptors with embeddings https://arxiv.org/pdf/1407.2170.pdf

wbia.algo.smk.smk_funcs.aggregate_rvecs(rvecs, maws, error_flags)[source]

Compute aggregated residual vectors Phi(X_c)

CommandLine:

python -m wbia.algo.smk.smk_funcs aggregate_rvecs –show

Example

>>> # ENABLE_DOCTEST
>>> from wbia.algo.smk.smk_funcs import *  # NOQA
>>> vecs, words = ut.take(testdata_rvecs(), ['vecs', 'words'])
>>> word = words[-1]
>>> rvecs, error_flags = compute_rvec(vecs, word)
>>> maws = [1.0] * len(rvecs)
>>> agg_rvec, agg_flag = aggregate_rvecs(rvecs, maws, error_flags)
>>> ut.quit_if_noshow()
>>> import wbia.plottool as pt
>>> pt.qt4ensure()
>>> pt.figure()
>>> # recenter residuals for visualization
>>> agg_cvec = agg_rvec + word
>>> cvecs = (rvecs + word[None, :])
>>> pt.plot(word[0], word[1], 'r*', markersize=12, label='word')
>>> pt.plot(agg_cvec[0], agg_cvec[1], 'ro', label='re-centered agg_rvec')
>>> pt.plot(vecs.T[0], vecs.T[1], 'go', label='original vecs')
>>> pt.plot(cvecs.T[0], cvecs.T[1], 'b.', label='re-centered rvec')
>>> pt.draw_line_segments2([word] * len(cvecs), cvecs, alpha=.5, color='black')
>>> pt.draw_line_segments2([word], [agg_cvec], alpha=.5, color='red')
>>> pt.gca().set_aspect('equal')
>>> pt.legend()
>>> ut.show_if_requested()
wbia.algo.smk.smk_funcs.assign_to_words(vocab, idx_to_vec, nAssign, massign_alpha=1.2, massign_sigma=80.0, massign_equal_weights=False, verbose=None)[source]

Assigns descriptor-vectors to nearest word.

Notes

Maybe move out of this file? The usage of vocab is out of this file scope.

Parameters
  • wordflann (FLANN) – nearest neighbor index over words

  • words (ndarray) – vocabulary words

  • idx_to_vec (ndarray) – descriptors to assign

  • nAssign (int) – number of words to assign each descriptor to

  • massign_alpha (float) – multiple-assignment ratio threshold

  • massign_sigma (float) – multiple-assignment gaussian variance

  • massign_equal_weights (bool) – assign equal weight to all multiassigned words

Returns

inverted index, multi-assigned weights, and forward index formated as: * wx_to_idxs - word index -> vector indexes * wx_to_maws - word index -> multi-assignment weights * idx2_wxs - vector index -> assigned word indexes

Return type

tuple

Example

>>> # SLOW_DOCTEST
>>> # xdoctest: +SKIP
>>> idx_to_vec = depc.d.get_feat_vecs(aid_list)[0][0::300]
>>> idx_to_vec = np.vstack((idx_to_vec, vocab.wx_to_word[0]))
>>> nAssign = 2
>>> massign_equal_weights = False
>>> massign_alpha = 1.2
>>> massign_sigma = 80.0
>>> nAssign = 2
>>> idx_to_wxs, idx_to_maws = assign_to_words(vocab, idx_to_vec, nAssign)
>>> print('idx_to_maws = %s' % (ut.repr2(idx_to_wxs, precision=2),))
>>> print('idx_to_wxs = %s' % (ut.repr2(idx_to_maws, precision=2),))
wbia.algo.smk.smk_funcs.build_matches_agg(X_fxs, Y_fxs, X_maws, Y_maws, score_list)[source]

Builds explicit features matches. Break and distribute up each aggregate score amongst its contributing features.

Returns

(fm, fs)

Return type

tuple

CommandLine:

python -m wbia.algo.smk.smk_funcs build_matches_agg –show

Example

>>> # ENABLE_DOCTEST
>>> from wbia.algo.smk.smk_funcs import *  # NOQA
>>> map_int = ut.partial(ut.lmap, ut.partial(np.array, dtype=np.int32))
>>> map_float = ut.partial(ut.lmap, ut.partial(np.array, dtype=np.float32))
>>> X_fxs = map_int([[0, 1], [2, 3, 4], [5]])
>>> Y_fxs = map_int([[8], [0, 4], [99]])
>>> X_maws = map_float([[1, 1], [1, 1, 1], [1]])
>>> Y_maws = map_float([[1], [1, 1], [1]])
>>> score_list = np.array([1, 2, 3], dtype=np.float32)
>>> (fm, fs) = build_matches_agg(X_fxs, Y_fxs, X_maws, Y_maws, score_list)
>>> print('fm = ' + ut.repr2(fm))
>>> print('fs = ' + ut.repr2(fs))
>>> assert len(fm) == len(fs)
>>> assert score_list.sum() == fs.sum()
wbia.algo.smk.smk_funcs.build_matches_sep(X_fxs, Y_fxs, scores_list)[source]

Just build matches. Scores have already been broken up. No need to do that.

Returns

(fm, fs)

Return type

tuple

CommandLine:

python -m wbia.algo.smk.smk_funcs build_matches_agg –show

Example

>>> # ENABLE_DOCTEST
>>> from wbia.algo.smk.smk_funcs import *  # NOQA
>>> map_int = ut.partial(ut.lmap, ut.partial(np.array, dtype=np.int32))
>>> map_float = ut.partial(ut.lmap, ut.partial(np.array, dtype=np.float32))
>>> X_fxs = map_int([[0, 1], [2, 3, 4], [5]])
>>> Y_fxs = map_int([[8], [0, 4], [99]])
>>> scores_list = map_float([
>>>     [[.1], [.2],],
>>>     [[.3, .4], [.4, .6], [.5, .9],],
>>>     [[.4]],
>>> ])
>>> (fm, fs) = build_matches_sep(X_fxs, Y_fxs, scores_list)
>>> print('fm = ' + ut.repr2(fm))
>>> print('fs = ' + ut.repr2(fs))
>>> assert len(fm) == len(fs)
>>> assert np.isclose(np.sum(ut.total_flatten(scores_list)), fs.sum())
wbia.algo.smk.smk_funcs.cast_residual_integer(rvecs)[source]

quantize residual vectors to 8-bits using the same trunctation hack as in SIFT. values will typically not reach the maximum, so we can multiply by a higher number for better fidelity.

Parameters

rvecs (ndarray[float64_t]) –

Returns

Return type

ndarray[uint8_t]

CommandLine:

python -m wbia.algo.smk.smk_funcs cast_residual_integer –show

Example

>>> # ENABLE_DOCTEST
>>> from wbia.algo.smk.smk_funcs import *  # NOQA
>>> rvecs = testdata_rvecs(dim=128)['rvecs'][4:]
>>> rvecs_int8 = cast_residual_integer(rvecs)
>>> rvecs_float = uncast_residual_integer(rvecs_int8)
>>> # Casting from float to int8 will result in a max quantization error
>>> measured_error = np.abs(rvecs_float - rvecs)
>>> # But there are limits on what this error can be
>>> cutoff = 127  # np.iinfo(np.int8).max
>>> fidelity = 255.0
>>> theory_error_in = 1 / fidelity
>>> theory_error_out = (fidelity - cutoff) / fidelity
>>> # Determine if any component values exceed the cutoff
>>> is_inside = (np.abs(rvecs * fidelity) < cutoff)
>>> # Check theoretical maximum for values inside and outside cutoff
>>> error_stats_in = ut.get_stats(measured_error[is_inside])
>>> error_stats_out = ut.get_stats(measured_error[~is_inside])
>>> print('inside cutoff error stats: ' + ut.repr4(error_stats_in, precision=8))
>>> print('outside cutoff error stats: ' + ut.repr4(error_stats_out, precision=8))
>>> assert rvecs_int8.dtype == np.int8
>>> assert np.all(measured_error[is_inside] < theory_error_in)
>>> assert np.all(measured_error[~is_inside] < theory_error_out)
wbia.algo.smk.smk_funcs.compute_rvec(vecs, word)[source]

Compute residual vectors phi(x_c)

Subtract each vector from its quantized word to get the resiudal, then normalize residuals to unit length.

CommandLine:

python -m wbia.algo.smk.smk_funcs compute_rvec –show

Example

>>> # ENABLE_DOCTEST
>>> from wbia.algo.smk.smk_funcs import *  # NOQA
>>> vecs, words = ut.take(testdata_rvecs(), ['vecs', 'words'])
>>> word = words[-1]
>>> rvecs, error_flags = compute_rvec(vecs, word)
>>> ut.quit_if_noshow()
>>> import wbia.plottool as pt
>>> pt.figure()
>>> # recenter residuals for visualization
>>> cvecs = (rvecs + word[None, :])
>>> pt.plot(word[0], word[1], 'r*', markersize=12, label='word')
>>> pt.plot(vecs.T[0], vecs.T[1], 'go', label='original vecs')
>>> pt.plot(cvecs.T[0], cvecs.T[1], 'b.', label='re-centered rvec')
>>> pt.draw_line_segments2(cvecs, [word] * len(cvecs), alpha=.5, color='black')
>>> pt.gca().set_aspect('equal')
>>> pt.legend()
>>> ut.show_if_requested()
wbia.algo.smk.smk_funcs.compute_stacked_agg_rvecs(words, flat_wxs_assign, flat_vecs, flat_offsets)[source]

More efficient version of agg on a stacked structure

Parameters
  • words (ndarray) – entire vocabulary of words

  • flat_wxs_assign (ndarray) – maps a stacked index to word index

  • flat_vecs (ndarray) – stacked SIFT descriptors

  • flat_offsets (ndarray) – offset positions per annotation

Example

>>> # DISABLE_DOCTEST
>>> from wbia.algo.smk.smk_funcs import *  # NOQA
>>> data = testdata_rvecs(dim=2, nvecs=1000, nannots=10)
>>> words = data['words']
>>> flat_offsets = data['offset_list']
>>> flat_wxs_assign, flat_vecs = ut.take(data, ['idx_to_wx', 'vecs'])
>>> tup = compute_stacked_agg_rvecs(words, flat_wxs_assign, flat_vecs, flat_offsets)
>>> all_agg_vecs, all_error_flags, agg_offset_list = tup
>>> agg_rvecs_list = [all_agg_vecs[l:r] for l, r in ut.itertwo(agg_offset_list)]
>>> agg_flags_list = [all_error_flags[l:r] for l, r in ut.itertwo(agg_offset_list)]
>>> assert len(agg_flags_list) == len(flat_offsets) - 1

Example

>>> # DISABLE_DOCTEST
>>> from wbia.algo.smk.smk_funcs import *  # NOQA
>>> data = testdata_rvecs(dim=2, nvecs=100, nannots=5)
>>> words = data['words']
>>> flat_offsets = data['offset_list']
>>> flat_wxs_assign, flat_vecs = ut.take(data, ['idx_to_wx', 'vecs'])
>>> tup = compute_stacked_agg_rvecs(words, flat_wxs_assign, flat_vecs, flat_offsets)
>>> all_agg_vecs, all_error_flags, agg_offset_list = tup
>>> agg_rvecs_list = [all_agg_vecs[l:r] for l, r in ut.itertwo(agg_offset_list)]
>>> agg_flags_list = [all_error_flags[l:r] for l, r in ut.itertwo(agg_offset_list)]
>>> assert len(agg_flags_list) == len(flat_offsets) - 1
wbia.algo.smk.smk_funcs.gamma_agg(phisX, flagsX, weight_list, alpha, thresh)[source]

Computes gamma (self consistency criterion) It is a scalar which ensures K(X, X) = 1

Returns

sccw self-consistency-criterion weight

Return type

float

Math:

gamma(X) = (sum_{c in C} w_c M(X_c, X_c))^{-.5}

Example

>>> # DISABLE_DOCTEST
>>> from wbia.algo.smk.smk_pipeline import *  # NOQA
>>> ibs, smk, qreq_= testdata_smk()
>>> X = qreq_.qinva.grouped_annots[0]
>>> wx_to_weight = qreq_.wx_to_weight
>>> print('X.gamma = %r' % (gamma(X),))
wbia.algo.smk.smk_funcs.gamma_sep(phisX_list, flagsX_list, weight_list, alpha, thresh)[source]
wbia.algo.smk.smk_funcs.inv_doc_freq(ndocs_total, ndocs_per_word)[source]
Parameters
  • ndocs_total (int) – numer of unique documents

  • ndocs_per_word (ndarray) – ndocs_per_word[i] should correspond to the number of unique documents containing word[i]

Returns

idf_per_word

Return type

ndarrary

Example

>>> # ENABLE_DOCTEST
>>> from wbia.algo.smk.smk_funcs import *  # NOQA
>>> ndocs_total = 21
>>> ndocs_per_word = [0, 21, 20, 2, 15, 8, 12, 1, 2]
>>> idf_per_word = inv_doc_freq(ndocs_total, ndocs_per_word)
>>> result = '%s' % (ut.repr2(idf_per_word, precision=2),)
>>> print(result)
np.array([0.  , 0.  , 0.05, 2.35, 0.34, 0.97, 0.56, 3.04, 2.35])
wbia.algo.smk.smk_funcs.invert_assigns(idx_to_wxs, idx_to_maws, verbose=False)[source]

Inverts assignment of vectors->to->words into words->to->vectors. Invert mapping – Group by word indexes

This gives a HUGE speedup over the old invert_assigns

Example

>>> # ENABLE_DOCTEST
>>> from wbia.algo.smk.smk_funcs import *  # NOQA
>>> idx_to_wxs = np.ma.array([
>>>     (0, 4),
>>>     (2, -1),
>>>     (2, 0)], dtype=np.int32)
>>> idx_to_wxs[1, 1] = np.ma.masked
>>> idx_to_maws = np.ma.array(
>>>     [(.5, 1.), (1., np.nan), (.5, .5)], dtype=np.float32)
>>> idx_to_maws[1, 1] = np.ma.masked
>>> tup = invert_assigns(idx_to_wxs, idx_to_maws)
>>> wx_to_idxs, wx_to_maws = tup
>>> result = 'wx_to_idxs = %s' % (ut.repr4(wx_to_idxs, with_dtype=True),)
>>> result += '\nwx_to_maws = %s' % (ut.repr4(wx_to_maws, with_dtype=True),)
>>> print(result)
wx_to_idxs = {
    0: np.array([0, 2], dtype=np.int32),
    2: np.array([1, 2], dtype=np.int32),
    4: np.array([0], dtype=np.int32),
}
wx_to_maws = {
    0: np.array([0.5, 0.5], dtype=np.float32),
    2: np.array([1. , 0.5], dtype=np.float32),
    4: np.array([1.], dtype=np.float32),
}
wbia.algo.smk.smk_funcs.invert_assigns_old(idx_to_wxs, idx_to_maws, verbose=False)[source]

Inverts assignment of vectors to words into words to vectors.

Example

>>> # ENABLE_DOCTEST
>>> from wbia.algo.smk.smk_funcs import *  # NOQA
>>> idx_to_wxs = [
>>>     np.array([0, 4], dtype=np.int32),
>>>     np.array([2], dtype=np.int32),
>>>     np.array([2, 0], dtype=np.int32),
>>> ]
>>> idx_to_maws = [
>>>     np.array([ 0.5,  0.5], dtype=np.float32),
>>>     np.array([ 1.], dtype=np.float32),
>>>     np.array([ 0.5,  0.5], dtype=np.float32),
>>> ]
>>> wx_to_idxs, wx_to_maws = invert_assigns_old(idx_to_wxs, idx_to_maws)
>>> result = 'wx_to_idxs = %s' % (ut.repr4(wx_to_idxs, with_dtype=True),)
>>> result += '\nwx_to_maws = %s' % (ut.repr4(wx_to_maws, with_dtype=True),)
>>> print(result)
wx_to_idxs = {
    0: np.array([0, 2], dtype=np.int32),
    2: np.array([1, 2], dtype=np.int32),
    4: np.array([0], dtype=np.int32),
}
wx_to_maws = {
    0: np.array([0.5, 0.5], dtype=np.float32),
    2: np.array([1. , 0.5], dtype=np.float32),
    4: np.array([0.5], dtype=np.float32),
}
wbia.algo.smk.smk_funcs.invert_lists(aids, wx_lists, all_wxs=None)[source]

takes corresponding lists of (aids, wxs) and maps wxs to aids

Example

>>> # ENABLE_DOCTEST
>>> from wbia.algo.smk.smk_funcs import *  # NOQA
>>> aids = [1, 2, 3]
>>> wx_lists = [[0, 1], [20, 0, 1], [3]]
>>> wx_to_aids = invert_lists(aids, wx_lists)
>>> result = ('wx_to_aids = %s' % (ut.repr2(wx_to_aids),))
>>> print(result)
wx_to_aids = {0: [1, 2], 1: [1, 2], 3: [3], 20: [2]}
wbia.algo.smk.smk_funcs.match_scores_agg(PhisX, PhisY, flagsX, flagsY, alpha, thresh)[source]

Scores matches to multiple words using aggregate residual vectors

Example

>>> # ENABLE_DOCTEST
>>> from wbia.algo.smk.smk_funcs import *  # NOQA
>>> PhisX = np.array([[ 0.        ,  0.        ],
>>>                   [-1.        ,  0.        ],
>>>                   [ 0.85085751,  0.52539652],
>>>                   [-0.89795083, -0.4400958 ],
>>>                   [-0.99934547,  0.03617512]])
>>> PhisY = np.array([[ 0.88299408, -0.46938411],
>>>                   [-0.12096522, -0.99265675],
>>>                   [-0.99948266, -0.03216222],
>>>                   [-0.08394916, -0.99647004],
>>>                   [-0.96414952, -0.26535957]])
>>> flagsX = np.array([True, False, False, True, False])[:, None]
>>> flagsY = np.array([False, False, False, True, False])[:, None]
>>> alpha = 3.0
>>> thresh = 0.0
>>> score_list = match_scores_agg(PhisX, PhisY, flagsX, flagsY, alpha, thresh)
>>> result = 'score_list = ' + ut.repr2(score_list, precision=4)
>>> print(result)
score_list = np.array([1.    , 0.0018, 0.    , 1.    , 0.868 ])
wbia.algo.smk.smk_funcs.match_scores_sep(phisX_list, phisY_list, flagsX_list, flagsY_list, alpha, thresh)[source]

Scores matches to multiple words using lists of separeated residual vectors

wbia.algo.smk.smk_funcs.sccw_normalize(scores, weight_list)[source]
wbia.algo.smk.smk_funcs.selective_match_score(phisX, phisY, flagsX, flagsY, alpha, thresh)[source]

computes the score of each feature match

wbia.algo.smk.smk_funcs.selectivity(u, alpha=3.0, thresh=0.0, out=None)[source]

The selectivity function thresholds and applies a power law.

This downweights weak matches. The following is the exact definition from SMK paper. sigma_alpha(u) = (sign(u) * (u ** alpha) if u > thresh else 0)

Parameters
  • u (ndarray) – input score between (-1, +1)

  • alpha (float) – power law (default = 3.0)

  • thresh (float) – number between 0 and 1 (default = 0.0)

  • out (None) – inplace output (default = None)

Returns

score

Return type

float

CommandLine:
python -m wbia.plottool plot_func –show –range=-1,1

–setup=”import wbia” –func wbia.algo.smk.smk_funcs.selectivity “lambda u: sign(u) * abs(u)**3.0 * greater_equal(u, 0)”

python -m wbia.algo.smk.smk_funcs selectivity –show

Example

>>> # ENABLE_DOCTEST
>>> from wbia.algo.smk.smk_funcs import *  # NOQA
>>> u = np.array([-1.0, -.5, -.1, 0, .1, .5, 1.0])
>>> alpha = 3.0
>>> thresh = 0
>>> score = selectivity(u, alpha, thresh)
>>> result = ut.repr2(score.tolist(), precision=4)
>>> print(result)
[0.0000, 0.0000, 0.0000, 0.0000, 0.0010, 0.1250, 1.0000]
wbia.algo.smk.smk_funcs.testdata_rvecs(dim=2, nvecs=13, nwords=5, nannots=4)[source]

two dimensional test data

CommandLine:

python -m wbia.algo.smk.smk_funcs testdata_rvecs –show

Ignore:

dim = 2 nvecs = 13 nwords = 5 nannots = 5

Example

>>> # DISABLE_DOCTEST
>>> from wbia.algo.smk.smk_funcs import *  # NOQA
>>> data = testdata_rvecs()
>>> ut.quit_if_noshow()
>>> exec(ut.execstr_dict(data))
>>> import wbia.plottool as pt
>>> from scipy.spatial import Voronoi, voronoi_plot_2d
>>> pt.qt4ensure()
>>> fig = pt.figure()
>>> vor = Voronoi(words)
>>> pt.plot(words.T[0], words.T[1], 'r*', label='words')
>>> pt.plot(vecs.T[0], vecs.T[1], 'b.', label='vecs')
>>> # lines showing assignments (and residuals)
>>> pts1 = vecs
>>> pts2 = words[idx_to_wx.T[0]]
>>> pt.draw_line_segments2(pts1, pts2)
>>> pt.plot(vecs.T[0], vecs.T[1], 'g.', label='vecs')
>>> voronoi_plot_2d(vor, show_vertices=False, ax=pt.gca())
>>> extents = vt.get_pointset_extents(np.vstack((vecs, words)))
>>> extents = vt.scale_extents(extents, 1.1)
>>> ax = pt.gca()
>>> ax.set_aspect('equal')
>>> ax.set_xlim(*extents[0:2])
>>> ax.set_ylim(*extents[2:4])
>>> ut.show_if_requested()
wbia.algo.smk.smk_funcs.uncast_residual_integer(rvecs)[source]
Parameters

rvecs (ndarray[uint8_t]) –

Returns

Return type

ndarray[float64_t]

wbia.algo.smk.smk_funcs.weight_multi_assigns(_idx_to_wx, _idx_to_wdist, massign_alpha=1.2, massign_sigma=80.0, massign_equal_weights=False)[source]

Multi Assignment Weight Filtering from Improving Bag of Features

Parameters

() (massign_equal_weights) – Turns off soft weighting. Gives all assigned vectors weight 1

Returns

(idx_to_wxs, idx_to_maws)

Return type

tuple

Example

>>> # ENABLE_DOCTEST
>>> from wbia.algo.smk.smk_funcs import *  # NOQA
>>> _idx_to_wx = np.array([[0, 1], [2, 3], [4, 5], [2, 0]])
>>> _idx_to_wdist = np.array([[.1, .11], [.2, .25], [.03, .25], [0, 1]])
>>> massign_alpha = 1.2
>>> massign_sigma = 80.0
>>> massign_equal_weights = False
>>> idx_to_wxs, idx_to_maws = weight_multi_assigns(
>>>     _idx_to_wx, _idx_to_wdist, massign_alpha, massign_sigma,
>>>     massign_equal_weights)
>>> result = 'idx_to_wxs = %s' % (ut.repr2(idx_to_wxs.astype(np.float64)),)
>>> result += '\nidx_to_maws = %s' % (ut.repr2(idx_to_maws, precision=2),)
>>> print(result)
idx_to_wxs = np.ma.MaskedArray([[0., 1.],
                   [2., inf],
                   [4., inf],
                   [2., 0.]])
idx_to_maws = np.ma.MaskedArray([[0.5, 0.5],
                   [1. , inf],
                   [1. , inf],
                   [0.5, 0.5]])

Example

>>> # ENABLE_DOCTEST
>>> from wbia.algo.smk.smk_funcs import *  # NOQA
>>> _idx_to_wx = np.array([[0, 1], [2, 3], [4, 5], [2, 0]])
>>> _idx_to_wdist = np.array([[.1, .11], [.2, .25], [.03, .25], [0, 1]])
>>> _idx_to_wx = _idx_to_wx.astype(np.int32)
>>> _idx_to_wdist = _idx_to_wdist.astype(np.float32)
>>> massign_alpha = 1.2
>>> massign_sigma = 80.0
>>> massign_equal_weights = True
>>> idx_to_wxs, idx_to_maws = weight_multi_assigns(
>>>     _idx_to_wx, _idx_to_wdist, massign_alpha, massign_sigma,
>>>     massign_equal_weights)
>>> result = 'idx_to_wxs = %s' % (ut.repr2(idx_to_wxs.astype(np.float64)),)
>>> result += '\nidx_to_maws = %s' % (ut.repr2(idx_to_maws, precision=2),)
>>> print(result)
idx_to_wxs = np.ma.MaskedArray([[0., 1.],
                   [2., inf],
                   [4., inf],
                   [2., 0.]])
idx_to_maws = np.ma.MaskedArray([[1., 1.],
                   [1., inf],
                   [1., inf],
                   [1., 1.]])

wbia.algo.smk.smk_pipeline module

Oxford Experiment:

wbia TestResult –db Oxford -p smk:nWords=[64000],nAssign=[1],SV=[False],can_match_sameimg=True -a oxford

Zebra Experiment:
python -m wbia draw_rank_cmc –db GZ_Master1 –show
-p :proot=smk,num_words=[64000],fg_on=False,nAssign=[1],SV=[False]

:proot=vsmany,fg_on=False,SV=[False]

-a ctrl:qmingt=2

python -m wbia draw_rank_cmc –db PZ_Master1 –show
-p :proot=smk,num_words=[64000],fg_on=False,nAssign=[1],SV=[False]

:proot=vsmany,fg_on=False,SV=[False]

-a ctrl:qmingt=2

class wbia.algo.smk.smk_pipeline.MatchHeuristicsConfig(**kwargs)[source]

Bases: wbia.dtool.base.Config

class wbia.algo.smk.smk_pipeline.SMK[source]

Bases: utool.util_dev.NiceRepr

Harness class that controls the execution of the SMK algorithm

K(X, Y) = gamma(X) * gamma(Y) * sum([Mc(Xc, Yc) for c in words])

match_single(qaid, daids, qreq_, verbose=True)[source]
CommandLine:

python -m wbia.algo.smk.smk_pipeline SMK.match_single –profile python -m wbia.algo.smk.smk_pipeline SMK.match_single –show

python -m wbia SMK.match_single -a ctrl:qmingt=2 –profile –db PZ_Master1 python -m wbia SMK.match_single -a ctrl –profile –db GZ_ALL

Example

>>> # xdoctest: +REQUIRES(--slow)
>>> # FUTURE_ENABLE
>>> from wbia.algo.smk.smk_pipeline import *  # NOQA
>>> import wbia
>>> qreq_ = wbia.testdata_qreq_(defaultdb='PZ_MTEST')
>>> ibs = qreq_.ibs
>>> daids = qreq_.daids
>>> #ibs, daids = wbia.testdata_aids(defaultdb='PZ_MTEST', default_set='dcfg')
>>> qreq_ = SMKRequest(ibs, daids[0:1], daids, {'agg': True,
>>>                                             'num_words': 1000,
>>>                                             'sv_on': True})
>>> qreq_.ensure_data()
>>> qaid = qreq_.qaids[0]
>>> daids = qreq_.daids
>>> daid = daids[1]
>>> verbose = True
>>> cm = qreq_.smk.match_single(qaid, daids, qreq_)
>>> ut.quit_if_noshow()
>>> ut.qtensure()
>>> cm.ishow_analysis(qreq_)
>>> ut.show_if_requested()
predict_matches(qreq_, verbose=True)[source]
>>> from wbia.algo.smk.smk_pipeline import *  # NOQA
>>> ibs, smk, qreq_ = testdata_smk()
>>> verbose = True
rrr(verbose=True, reload_module=True)

special class reloading function This function is often injected as rrr of classes

class wbia.algo.smk.smk_pipeline.SMKRequest(ibs=None, qaids=None, daids=None, config=None)[source]

Bases: wbia.algo.smk.match_chips5.EstimatorRequest

qreq_-like object. Trying to work on becoming more scikit-ish

CommandLine:

python -m wbia.algo.smk.smk_pipeline SMKRequest –profile python -m wbia.algo.smk.smk_pipeline SMKRequest –show

python -m wbia draw_rank_cmc –db GZ_ALL –show

-p :proot=smk,num_words=[64000,4000],nAssign=[1,5],sv_on=[False,True] -a ctrl:qmingt=2

python -m wbia draw_rank_cmc –db PZ_MTEST –show
-p :proot=smk,num_words=[64000,8000,4000],nAssign=[1,2,4],sv_on=[True,False]

default:proot=vsmany,sv_on=[True,False]

-a default:qmingt=2

python -m wbia draw_rank_cmc –db PZ_MTEST –show
-p :proot=smk,num_words=[64000],nAssign=[1],sv_on=[True]

default:proot=vsmany,sv_on=[True]

-a default:qmingt=2

python -m wbia draw_rank_cmc –db PZ_Master1 –show

-p :proot=smk,num_words=[64000],nAssign=[1],sv_on=[False] -a ctrl:qmingt=2

python -m wbia draw_rank_cmc –db PZ_Master1

-p :proot=smk,num_words=[64000],nAssign=[1],sv_on=[True] -a ctrl:qmingt=2,qindex=60:80 –profile

python -m wbia draw_rank_cmc –db GZ_ALL

-p :proot=smk,num_words=[64000],nAssign=[1],sv_on=[True] -a ctrl:qmingt=2,qindex=40:60 –profile

Example

>>> # DISABLE_DOCTEST
>>> from wbia.algo.smk.smk_pipeline import *  # NOQA
>>> import wbia
>>> ibs, aid_list = wbia.testdata_aids(defaultdb='PZ_MTEST')
>>> qaids = aid_list[0:2]
>>> daids = aid_list[:]
>>> config = {'nAssign': 2, 'num_words': 64000, 'sv_on': True}
>>> qreq_ = SMKRequest(ibs, qaids, daids, config)
>>> qreq_.ensure_data()
>>> cm_list = qreq_.execute()
>>> ut.quit_if_noshow()
>>> ut.qtensure()
>>> cm_list[0].ishow_analysis(qreq_, fnum=1, viz_name_score=False)
>>> cm_list[1].ishow_analysis(qreq_, fnum=2, viz_name_score=False)
>>> ut.show_if_requested()
dump_vectors()[source]

Example

>>> # DISABLE_DOCTEST
>>> from wbia.algo.smk.smk_pipeline import *  # NOQA
>>> import wbia
>>> ibs, aid_list = wbia.testdata_aids(defaultdb='PZ_MTEST', a='default:mingt=2,pername=2')
>>> qaids = aid_list[0:2]
>>> daids = aid_list[:]
>>> config = {'nAssign': 1, 'num_words': 8000,
>>>           'sv_on': True}
>>> qreq_ = SMKRequest(ibs, qaids, daids, config)
>>> qreq_.ensure_data()
ensure_data()[source]
>>> import wbia
qreq_ = wbia.testdata_qreq_(
    defaultdb='Oxford', a='oxford',
    p='default:proot=smk,nAssign=1,num_words=64000,SV=False,can_match_sameimg=True,dim_size=None')
execute_pipeline()[source]
>>> from wbia.algo.smk.smk_pipeline import *  # NOQA
>>> ibs, smk, qreq_ = testdata_smk()
>>> cm_list = qreq_.execute()
get_qreq_dannot_kpts(daids)[source]
get_qreq_qannot_kpts(qaids)[source]
rrr(verbose=True, reload_module=True)

special class reloading function This function is often injected as rrr of classes

class wbia.algo.smk.smk_pipeline.SMKRequestConfig(**kwargs)[source]

Bases: wbia.dtool.base.Config

Figure out how to do this

wbia.algo.smk.smk_pipeline.check_can_match(qaid, hit_daids, qreq_)[source]
wbia.algo.smk.smk_pipeline.match_kernel_agg(X, Y, wx_to_weight, alpha, thresh)[source]
wbia.algo.smk.smk_pipeline.match_kernel_sep(X, Y, wx_to_weight, alpha, thresh)[source]
wbia.algo.smk.smk_pipeline.testdata_smk(*args, **kwargs)[source]
>>> from wbia.algo.smk.smk_pipeline import *  # NOQA
>>> kwargs = {}
wbia.algo.smk.smk_pipeline.word_isect(X, Y, wx_to_weight)[source]

wbia.algo.smk.vocab_indexer module

class wbia.algo.smk.vocab_indexer.VisualVocab(words=None)[source]

Bases: utool.util_dev.NiceRepr

Class that maintains a list of visual words (cluster centers) Also maintains a nearest neighbor index structure for finding words. This class is build using the depcache

build(verbose=True)[source]
nn_index(idx_to_vec, nAssign, checks=None)[source]
>>> idx_to_vec = depc.d.get_feat_vecs(aid_list)[0]
>>> vocab = vocab
>>> nAssign = 1
render_vocab()[source]

Renders the average patch of each word. This is a quick visualization of the entire vocabulary.

CommandLine:

python -m wbia.algo.smk.vocab_indexer render_vocab –show

Example

>>> # DISABLE_DOCTEST
>>> from wbia.algo.smk.vocab_indexer import *  # NOQA
>>> vocab = testdata_vocab('PZ_MTEST', num_words=64)
>>> all_words = vocab.render_vocab()
>>> ut.quit_if_noshow()
>>> import wbia.plottool as pt
>>> pt.qt4ensure()
>>> pt.imshow(all_words)
>>> ut.show_if_requested()
rrr(verbose=True, reload_module=True)

special class reloading function This function is often injected as rrr of classes

property shape
class wbia.algo.smk.vocab_indexer.VocabConfig(**kwargs)[source]

Bases: wbia.dtool.base.Config

wbia.algo.smk.vocab_indexer.compute_vocab(depc, fid_list, config)[source]

Depcache method for computing a new visual vocab

CommandLine:

python -m wbia.core_annots –exec-compute_neighbor_index –show python -m wbia show_depc_annot_table_input –show –tablename=neighbor_index

python -m wbia.algo.smk.vocab_indexer –exec-compute_vocab:0 python -m wbia.algo.smk.vocab_indexer –exec-compute_vocab:1

# FIXME make util_tests register python -m wbia.algo.smk.vocab_indexer compute_vocab:0

Ignore:
>>> # Lev Oxford Debug Example
>>> import wbia
>>> ibs = wbia.opendb('Oxford')
>>> depc = ibs.depc
>>> table = depc['vocab']
>>> # Check what currently exists in vocab table
>>> table.print_configs()
>>> table.print_table()
>>> table.print_internal_info()
>>> # Grab aids used to compute vocab
>>> from wbia.expt.experiment_helpers import get_annotcfg_list
>>> expanded_aids_list = get_annotcfg_list(ibs, ['oxford'])[1]
>>> qaids, daids = expanded_aids_list[0]
>>> vocab_aids = daids
>>> config = {'num_words': 64000}
>>> exists = depc.check_rowids('vocab', [vocab_aids], config=config)
>>> print('exists = %r' % (exists,))
>>> vocab_rowid = depc.get_rowids('vocab', [vocab_aids], config=config)[0]
>>> print('vocab_rowid = %r' % (vocab_rowid,))
>>> vocab = table.get_row_data([vocab_rowid], 'words')[0]
>>> print('vocab = %r' % (vocab,))

Example

>>> # DISABLE_DOCTEST
>>> from wbia.algo.smk.vocab_indexer import *  # NOQA
>>> # Test depcache access
>>> import wbia
>>> ibs, aid_list = wbia.testdata_aids('testdb1')
>>> depc = ibs.depc_annot
>>> input_tuple = [aid_list]
>>> rowid_kw = {}
>>> tablename = 'vocab'
>>> vocabid_list = depc.get_rowids(tablename, input_tuple, **rowid_kw)
>>> vocab = depc.get(tablename, input_tuple, 'words')[0]
>>> assert vocab.wordflann is not None
>>> assert vocab.wordflann._FLANN__curindex_data is not None
>>> assert vocab.wordflann._FLANN__curindex_data is vocab.wx_to_word

Example

>>> # DISABLE_DOCTEST
>>> from wbia.algo.smk.vocab_indexer import *  # NOQA
>>> import wbia
>>> ibs, aid_list = wbia.testdata_aids('testdb1')
>>> depc = ibs.depc_annot
>>> fid_list = depc.get_rowids('feat', aid_list)
>>> config = VocabConfig()
>>> vocab, train_vecs = ut.exec_func_src(compute_vocab, keys=['vocab', 'train_vecs'])
>>> idx_to_vec = depc.d.get_feat_vecs(aid_list)[0]
>>> self = vocab
>>> ut.quit_if_noshow()
>>> data = train_vecs
>>> centroids = vocab.wx_to_word
>>> import wbia.plottool as pt
>>> vt.plot_centroids(data, centroids, num_pca_dims=2)
>>> ut.show_if_requested()
>>> #config = ibs.depc_annot['vocab'].configclass()
wbia.algo.smk.vocab_indexer.testdata_vocab(defaultdb='testdb1', **kwargs)[source]
>>> from wbia.algo.smk.vocab_indexer import *  # NOQA
>>> defaultdb='testdb1'
>>> kwargs = {'num_words': 1000}

Module contents

wbia.algo.smk.IMPORT_TUPLES = [('match_chips5', None), ('smk_pipeline', None), ('vocab_indexer', None)]

Regen Command: cd /home/joncrall/code/wbia/wbia/algo/smk makeinit.py –modname=wbia.algo.smk

wbia.algo.smk.reassign_submodule_attributes(verbose=True)[source]

why reloading all the modules doesnt do this I don’t know

wbia.algo.smk.reload_subs(verbose=True)[source]

Reloads wbia.algo.smk and submodules

wbia.algo.smk.rrrr(verbose=True)

Reloads wbia.algo.smk and submodules