Home > Back-end >  How can I optimize an SQL query for calculating word frequency?
How can I optimize an SQL query for calculating word frequency?

Time:12-09

I am trying to populate two tables:

token:

 word  | df(the number of documents containing a word) 
==========
"dog"  | 5    
"cat"  | 2    
"horse"| 1    

token_count:

tokenid | docid| tf(the number of times a word occurs in a document)
====================
   1    |  1   | 6
   2    |  2   | 2
   3    |  2   | 1

using the data from documents:

id   |  title  |     body
=============================
1    |  "dog"  | "about dogs" 
2    |  "cats" | "about cats"

To do that I use ts_stat( 'select to_tsvector(''english'', body) from documents' ) which returns a table with the document frequency for the word and also the number of times that words appears in the entire column. While the second column is exactly what I need for the token table the third column shows the document frequency for the entire column.

word | ndoc | nentry
====================
dog  | 5    | 6
cat  | 2    | 2
horse| 1    | 1

This code populates the token table and does it in 3sec for a hundred documents.

INSERT INTO token (word, document_frequency)
SELECT
    word,
    ndoc
FROM 
    ts_stat( 'select to_tsvector(''english'', body) from documents' );

I tried running the following code on a smaller dataset of 15 documents and it worked but when I'm trying to run this on the current dataset(100 docs) it never stops running.

WITH temp_data AS (
    SELECT id , 
           (ts_stat('select to_tsvector(''english'', body) from documents where id='||id)).*
    FROM documents 
)
INSERT INTO token_count (docid, tokenid, tf)
SELECT
    id,
    (SELECT id FROM token WHERE word = temp_data.word LIMIT 1),
    nentry
FROM temp_data;

How can I optimize this query?

EXPLAIN ANALYZE for the dataset of 15 documents:

"Insert on token_count  (cost=1023803.22..1938766428.23 rows=9100000 width=28) (actual time=59875.204..59875.206 rows=0 loops=1)"
"  CTE temp_data"
"    ->  Result  (cost=0.00..1023803.22 rows=9100000 width=44) (actual time=0.144..853.320 rows=42449 loops=1)"
"          ->  ProjectSet  (cost=0.00..45553.23 rows=9100000 width=36) (actual time=0.142..809.366 rows=42449 loops=1)"
"                ->  Seq Scan on wikitable  (cost=0.00..19.10 rows=910 width=4) (actual time=0.010..0.029 rows=16 loops=1)"
"  ->  CTE Scan on temp_data  (cost=0.00..1937742625.00 rows=9100000 width=28) (actual time=0.509..59652.279 rows=42449 loops=1)"
"        SubPlan 2"
"          ->  Limit  (cost=0.00..212.92 rows=1 width=4) (actual time=1.381..1.381 rows=1 loops=42449)"
"                ->  Seq Scan on token  (cost=0.00..425.84 rows=2 width=4) (actual time=1.372..1.372 rows=1 loops=42449)"
"                      Filter: ((word)::text = temp_data.word)"
"                      Rows Removed by Filter: 10384"
"Planning Time: 0.202 ms"
"Execution Time: 59876.350 ms"

EXPLAIN ANALYZE for the dataset of 30 documents:

"Insert on token_count  (cost=1023803.22..6625550803.23 rows=9100000 width=28) (actual time=189910.438..189910.439 rows=0 loops=1)"
"  CTE temp_data"
"    ->  Result  (cost=0.00..1023803.22 rows=9100000 width=44) (actual time=0.191..2018.758 rows=92168 loops=1)"
"          ->  ProjectSet  (cost=0.00..45553.23 rows=9100000 width=36) (actual time=0.189..1919.726 rows=92168 loops=1)"
"                ->  Seq Scan on wikitable  (cost=0.00..19.10 rows=910 width=4) (actual time=0.013..0.053 rows=31 loops=1)"
"  ->  CTE Scan on temp_data  (cost=0.00..6624527000.00 rows=9100000 width=28) (actual time=1.009..189412.022 rows=92168 loops=1)"
"        SubPlan 2"
"          ->  Limit  (cost=0.00..727.95 rows=1 width=4) (actual time=2.029..2.029 rows=1 loops=92168)"
"                ->  Seq Scan on token  (cost=0.00..727.95 rows=1 width=4) (actual time=2.020..2.020 rows=1 loops=92168)"
"                      Filter: ((word)::text = temp_data.word)"
"                      Rows Removed by Filter: 16463"
"Planning Time: 0.234 ms"
"Execution Time: 189913.688 ms"

CodePudding user response:

Here's a demo that doesn't use ts_stat to get the word counts.

Instead it uses a lateral join to an unnesting of the ts_vector.

create table documents (
 document_id serial primary key, 
 title varchar(30) not null, 
 body text not null
);

insert into documents (title, body) values
  ('dogs', 'the dog barked at the cat, but the cat ignored her.')
, ('cats', 'cats kill more birds than dogs kill cats')

create table tokens (
 token_id serial primary key, 
 word varchar(30),
 df int
)

insert into tokens (word, df)
SELECT word, ndoc
FROM ts_stat('select to_tsvector(''english'', body) from documents');
select * from tokens order by df desc
token_id | word  | df
-------: | :---- | -:
       3 | dog   |  2
       4 | cat   |  2
       1 | kill  |  1
       2 | ignor |  1
       5 | bird  |  1
       6 | bark  |  1
create table token_counts (
 document_id int, 
 token_id int,
 tf int, 
 primary key (document_id, token_id), 
 foreign key (document_id) references documents(document_id), 
 foreign key (token_id) references tokens(token_id)
);
INSERT INTO token_counts (
 document_id, 
 token_id, 
 tf
)
select 
 doc.document_id, 
 tok.token_id, 
 lex.total
from documents as doc
cross join lateral (
  select lexeme, cardinality(positions) as total
  from unnest(to_tsvector('english', doc.body)) as tsvector
) as lex
inner join tokens as tok
  on tok.word = lex.lexeme;
select title, word, tf
from token_counts cnt
join documents doc using(document_id)
join tokens tok using(token_id)
order by document_id, token_id
title word tf
dogs ignor 1
dogs dog 1
dogs cat 2
dogs bark 1
cats kill 2
cats dog 1
cats cat 2
cats bird 1

Demo on db<>fiddle here

  • Related