from textblob import TextBlob
gravity = TextBlob('A screaming comes across the sky.')
gravity.words
WordList(['A', 'screaming', 'comes', 'across', 'the', 'sky'])
gravity.tags
[('A', 'DT'), ('screaming', 'NN'), ('comes', 'VBZ'), ('across', 'IN'), ('the', 'DT'), ('sky', 'NN')]
testtext = TextBlob('"Etc." is used to indicate that only some of the items from a list have been used. Generally, in American English, if "etc." is used in the middle of a sentence, it is followed by a comma. (Tennis, soccer, baseball, etc., are outdoor games.) ... (Being outdoors, we played tennis, soccer, baseball, etc.)')
testtext.words
WordList(['Etc', 'is', 'used', 'to', 'indicate', 'that', 'only', 'some', 'of', 'the', 'items', 'from', 'a', 'list', 'have', 'been', 'used', 'Generally', 'in', 'American', 'English', 'if', 'etc', 'is', 'used', 'in', 'the', 'middle', 'of', 'a', 'sentence', 'it', 'is', 'followed', 'by', 'a', 'comma', 'Tennis', 'soccer', 'baseball', 'etc', 'are', 'outdoor', 'games', 'Being', 'outdoors', 'we', 'played', 'tennis', 'soccer', 'baseball', 'etc'])
sorted(testtext.words)
['American', 'Being', 'English', 'Etc', 'Generally', 'Tennis', 'a', 'a', 'a', 'are', 'baseball', 'baseball', 'been', 'by', 'comma', 'etc', 'etc', 'etc', 'followed', 'from', 'games', 'have', 'if', 'in', 'in', 'indicate', 'is', 'is', 'is', 'it', 'items', 'list', 'middle', 'of', 'of', 'only', 'outdoor', 'outdoors', 'played', 'sentence', 'soccer', 'soccer', 'some', 'tennis', 'that', 'the', 'the', 'to', 'used', 'used', 'used', 'we']
len(testtext.words)
52
testtext.words[:3]
WordList(['Etc', 'is', 'used'])
testtext.tokens
WordList(['``', 'Etc', '.', "''", 'is', 'used', 'to', 'indicate', 'that', 'only', 'some', 'of', 'the', 'items', 'from', 'a', 'list', 'have', 'been', 'used', '.', 'Generally', ',', 'in', 'American', 'English', ',', 'if', '``', 'etc', '.', "''", 'is', 'used', 'in', 'the', 'middle', 'of', 'a', 'sentence', ',', 'it', 'is', 'followed', 'by', 'a', 'comma', '.', '(', 'Tennis', ',', 'soccer', ',', 'baseball', ',', 'etc.', ',', 'are', 'outdoor', 'games', '.', ')', '...', '(', 'Being', 'outdoors', ',', 'we', 'played', 'tennis', ',', 'soccer', ',', 'baseball', ',', 'etc', '.', ')'])
TextBlob("Can't touch this.").tokens
WordList(['Ca', "n't", 'touch', 'this', '.'])
TextBlob("Can't touch this.").words
WordList(['Ca', "n't", 'touch', 'this'])
TextBlob('Cannot touch this.').words
WordList(['Can', 'not', 'touch', 'this'])
TextBlob('Cannot touch this.').tokens
WordList(['Can', 'not', 'touch', 'this', '.'])
pride = TextBlob('It is a truth universally acknowledged, that a single female in possession of a good fortune, must be in want of a puppy.')
pride.tags
[('It', 'PRP'), ('is', 'VBZ'), ('a', 'DT'), ('truth', 'NN'), ('universally', 'RB'), ('acknowledged', 'VBD'), ('that', 'IN'), ('a', 'DT'), ('single', 'JJ'), ('female', 'NN'), ('in', 'IN'), ('possession', 'NN'), ('of', 'IN'), ('a', 'DT'), ('good', 'JJ'), ('fortune', 'NN'), ('must', 'MD'), ('be', 'VB'), ('in', 'IN'), ('want', 'NN'), ('of', 'IN'), ('a', 'DT'), ('puppy', 'JJ')]
type(pride.tags)
list
it's funny that puppy is catergorized as JJ adjective
count = 0
for (word, tag) in pride.tags:
if tag == 'JJ':
count = count + 1
print(count)
3
def jjcounter(value):
count = 0
for (word, tag) in pride.tags:
if tag == 'JJ':
count = count + 1
return(count)
def nncounter(value):
count = 0
for (word, tag) in pride.tags:
if tag == 'NN':
count = count + 1
return(count)
jjcounter(pride.tags), nncounter(pride.tags)
(3, 5)
def jjcounter(value):
count = 0
for (word, tag) in pride.tags:
if tag == 'JJ':
count = count + 1
return(count)
def nncounter(value):
count = 0
for (word, tag) in pride.tags:
if tag == 'NN':
count = count + 1
return(count)
testtext.tags
[('Etc', 'NN'), ('is', 'VBZ'), ('used', 'VBN'), ('to', 'TO'), ('indicate', 'VB'), ('that', 'IN'), ('only', 'RB'), ('some', 'DT'), ('of', 'IN'), ('the', 'DT'), ('items', 'NNS'), ('from', 'IN'), ('a', 'DT'), ('list', 'NN'), ('have', 'VBP'), ('been', 'VBN'), ('used', 'VBN'), ('Generally', 'RB'), ('in', 'IN'), ('American', 'JJ'), ('English', 'NNP'), ('if', 'IN'), ('etc', 'FW'), ('is', 'VBZ'), ('used', 'VBN'), ('in', 'IN'), ('the', 'DT'), ('middle', 'NN'), ('of', 'IN'), ('a', 'DT'), ('sentence', 'NN'), ('it', 'PRP'), ('is', 'VBZ'), ('followed', 'VBN'), ('by', 'IN'), ('a', 'DT'), ('comma', 'NN'), ('Tennis', 'NNP'), ('soccer', 'NN'), ('baseball', 'NN'), ('etc.', 'FW'), ('are', 'VBP'), ('outdoor', 'JJ'), ('games', 'NNS'), ('Being', 'VBG'), ('outdoors', 'NNS'), ('we', 'PRP'), ('played', 'VBD'), ('tennis', 'NN'), ('soccer', 'NN'), ('baseball', 'NN'), ('etc', 'FW')]
def s2t_tag_org(text):
return text.tags
s2t_tag_org(gravity)
[('A', 'DT'), ('screaming', 'NN'), ('comes', 'VBZ'), ('across', 'IN'), ('the', 'DT'), ('sky', 'NN')]
def jjcounter(value):
count = 0
for (word, tag) in value.tags:
if tag == 'JJ':
count = count + 1
return(count)
def nncounter(value):
count = 0
for (word, tag) in value.tags:
if tag == 'NN':
count = count + 1
return(count)
I first tried this:
def jjcounter(value):
count = 0
for (word, tag) in pride.tags:
if tag == 'JJ':
count = count + 1
return(count)
**but** it did not work. So wrote this funtion **s2t_tag_org()**;give back words with tags
def s2t_tag_org(text):
return text.tags
Still did not work. So I changed **s2t_tag_org** into **value.tags**
def jjcounter(value):
count = 0
for (word, tag) in value.tags:
if tag == 'JJ':
count = count + 1
return(count)
def nncounter(value):
count = 0
for (word, tag) in value.tags:
if tag == 'NN':
count = count + 1
return(count)
Then it worked! :)
jjcounter(gravity)
0
nncounter(testtext)
10
testtext.sentences
[Sentence(""Etc.""), Sentence("is used to indicate that only some of the items from a list have been used."), Sentence("Generally, in American English, if "etc.""), Sentence("is used in the middle of a sentence, it is followed by a comma."), Sentence("(Tennis, soccer, baseball, etc., are outdoor games.)"), Sentence("... (Being outdoors, we played tennis, soccer, baseball, etc.)")]
len(testtext.sentences)
6
source = open('1342-0.txt.txt')
pride = source.read()
source.close()
jjcounter(testtext) / len(testtext.words)
0.038461538461538464
nncounter(testtext) / len(testtext.words)
0.19230769230769232
jjcounter(pride) / len(pride.words)
0.13043478260869565
nncounter(pride) / len(pride.words)
0.21739130434782608
If every novel had about the same percentage of adjectives, it almost certainly wouldn't be interesting to track and compare them. But future work along these lines, embracing a lager corpus of novels, has the potential to show interesting patters, wherther or not there is anything to be gained by comparing these two particular novels in this way.
"potential to show interesing pattern" is interesting. Because pattern means multiple, and it's mystery.
Inflected word list
An inflected form of a word has a changed spelling or ending
that shows the way it is used in sentences:
"Finds" and "found" are inflected forms of "find".
source = open('English_word_list.txt')
words = source.read().split()
source.close()
words[0:100]
['awaw', 'ability', 'able', 'about', 'above', 'accept', 'according', 'account', 'across', 'act', 'action', 'activity', 'actually', 'add', 'address', 'administration', 'admit', 'adult', 'affect', 'after', 'again', 'against', 'age', 'agency', 'agent', 'ago', 'agree', 'agreement', 'ahead', 'air', 'all', 'allow', 'almost', 'alone', 'along', 'already', 'also', 'although', 'always', 'American', 'among', 'amount', 'analysis', 'and', 'animal', 'another', 'answer', 'any', 'anyone', 'anything', 'appear', 'apply', 'approach', 'area', 'argue', 'arm', 'around', 'arrive', 'art', 'article', 'artist', 'as', 'ask', 'assume', 'at', 'attack', 'attention', 'attorney', 'audience', 'author', 'authority', 'available', 'avoid', 'away', 'baby', 'back', 'bad', 'bag', 'ball', 'bank', 'bar', 'base', 'be', 'beat', 'beautiful', 'because', 'become', 'bed', 'before', 'begin', 'behavior', 'behind', 'believe', 'benefit', 'best', 'better', 'between', 'beyond', 'big', 'bill']
for in : # Iteration through the word list
if ______: #conditional testing to see if we have a reduplication
print(____) #Only if we do, print the word
for x in words:
if len(x) is == 7:
print(x)
File "/tmp/ipykernel_25743/1195590102.py", line 2 if len(x) is == 7: ^ SyntaxError: invalid syntax
len('string')
6
print(words[:len(words//2)])
--------------------------------------------------------------------------- TypeError Traceback (most recent call last) /tmp/ipykernel_25743/3081815845.py in <module> ----> 1 print(words[:len(words//2)]) TypeError: unsupported operand type(s) for //: 'list' and 'int'
the expression I used is w[:len(w)//2] * 2 == w I called the variable that was holding the individa=ual words w. you may have called it something else, such as word. Then I took a slice of w from the beginning up to the halfway point, which as a floating-point number is len(w)/w.
However, we need an integer to be used in slicing this string, so we use interger division len(w)//w. (u need two //) Then I multipled this string by 2. This will do string multiplication like cowboy => cowcow but tutu => tutu What remains is to check to see if this is the same as the word itself.
Q. I understand why he wrote this line but what I do not understand the way is written. w[:len(w)] why ':'?
for w in words:
if w[:len(w)//2] * 2 == w:
print(w)
#hmm it's not working, I put 'awaw' into my list but it does not print 'awaw'
awaw
oh, when I rerun the source = open('English_word_list.txt') words = source.read().split() source.close()
now it prints 'awaw'
one thing that may seem odd is that in the program structure, I did not check to see whether the word has an odd or even number of characters. A word with an odd-length word cannot be a reduplication as we have defined it. If the conditional is constructed properly, it's not necessary. Finding a simpler solution is best. => this reminds me of Kimberly's point about rejection. That maybe rejection is about honesty and simplification, and clarification.
from textblob import Word
bank_word = Word('bank')
bank_word.synsets
#what you see here isn't a lost of synonyms,
#but a list of synsets, sets of synonyms
#that each pertain to a different sense.
[Synset('bank.n.01'), Synset('depository_financial_institution.n.01'), Synset('bank.n.03'), Synset('bank.n.04'), Synset('bank.n.05'), Synset('bank.n.06'), Synset('bank.n.07'), Synset('savings_bank.n.02'), Synset('bank.n.09'), Synset('bank.n.10'), Synset('bank.v.01'), Synset('bank.v.02'), Synset('bank.v.03'), Synset('bank.v.04'), Synset('bank.v.05'), Synset('deposit.v.02'), Synset('bank.v.07'), Synset('trust.v.01')]
bank_word.definitions
#bank_words.definitions => this one has name error, name 'bank_words' is not defined.
['sloping land (especially the slope beside a body of water)', 'a financial institution that accepts deposits and channels the money into lending activities', 'a long ridge or pile', 'an arrangement of similar objects in a row or in tiers', 'a supply or stock held in reserve for future use (especially in emergencies)', 'the funds held by a gambling house or the dealer in some gambling games', 'a slope in the turn of a road or track; the outside is higher than the inside in order to reduce the effects of centrifugal force', 'a container (usually with a slot in the top) for keeping money at home', 'a building in which the business of banking transacted', 'a flight maneuver; aircraft tips laterally about its longitudinal axis (especially in turning)', 'tip laterally', 'enclose with a bank', 'do business with a bank or keep an account at a bank', 'act as the banker in a game or in gambling', 'be in the banking business', 'put into a bank account', 'cover with ashes so to control the rate of burning', 'have confidence or faith in']
synset means different group, for example, the word bank can mean 은행, 강둑 so it is under different sub-group; synset
"When you select a particular sense, u r also choosing a synset, a set of particular words or phrases(called lemma names) that for most all practical purposes mean the same thing and relate to the specific sense."
bank1 = bank_word.synsets[0]
bank1.lemma_names()
['bank']
bank_word.synsets[1].lemma_names()
['depository_financial_institution', 'bank', 'banking_concern', 'banking_company']
bank_word.synsets[2].lemma_names()
['bank']
bank_word.synsets[3].lemma_names()
['bank']
bank_word.synsets[4].lemma_names()
['bank']
bank_word.synsets[7].lemma_names()
['savings_bank', 'coin_bank', 'money_box', 'bank']
So if you want to know if two synsets are the same, you should check to see if the two Synset objects are equal, not if they have the same lemma names as returned by their lemma_name() methods.
Because if you use lemma_name()methods, there are a lot of overlapping, same name 'bank'
bank3 = bank_word.synsets[2]
bank3.lemma_names()
bank1.lemma_names() == bank3.lemma_names()
True
bank1 == bank3
False
As you can see here, the definition, the content of bank1 and bank3 are not the same, but it is under the same lemma_names which may lead to confusion..?
from textblob.wordnet import NOUN
# what does this textblob '.' wordnet indicates? is it like directory..?
bank_word.get_synsets(NOUN)
[Synset('bank.n.01'), Synset('depository_financial_institution.n.01'), Synset('bank.n.03'), Synset('bank.n.04'), Synset('bank.n.05'), Synset('bank.n.06'), Synset('bank.n.07'), Synset('savings_bank.n.02'), Synset('bank.n.09'), Synset('bank.n.10')]
from textblob.wordnet import VERB
bank_word.get_synsets(VERB)
[Synset('bank.v.01'), Synset('bank.v.02'), Synset('bank.v.03'), Synset('bank.v.04'), Synset('bank.v.05'), Synset('deposit.v.02'), Synset('bank.v.07'), Synset('trust.v.01')]
Let's give it a lool on gernerality and specificity. "Sedan" is one particular more sepcific term for a car, while "motor vehicle" is a more general term.
car_word = Word('car')
car_word.get_synsets(NOUN)
[Synset('car.n.01'), Synset('car.n.02'), Synset('car.n.03'), Synset('car.n.04'), Synset('cable_car.n.01')]
car_word.definitions
['a motor vehicle with four wheels; usually propelled by an internal combustion engine', 'a wheeled vehicle adapted to the rails of railroad', 'the compartment that is suspended from an airship and that carries personnel and the cargo and the power plant', 'where passengers ride up and down', 'a conveyance for passengers or freight on a cable railway']
Synsets are ordered by how common each sense is, so we can expect that the automotive sense will be early in the list. It is indeed, first!
car1 = car_word.get_synsets(NOUN)[0]
car1.definition()
'a motor vehicle with four wheels; usually propelled by an internal combustion engine'
Q What's the difference between definitions and definition?? In line 127 I wrote car_word.definitions and in line 132 only car1.definition (not definitions) works.
car1.hypernyms()
[Synset('motor_vehicle.n.01')]
car1.hypernyms()[0]
Synset('motor_vehicle.n.01')
car1.hypernyms()[0].lemma_names()
['motor_vehicle', 'automotive_vehicle']
Hypernym means "a word whose meaning includes a group of other words:" => generality 더 큰 그룹을 의미 ex) The first hypernyms for dog that come to mind would be animal or pet.
and
Hyponym means "a word whose meaning is included in the meaning of another word:" => specificality 더 작은 그룹, 예시를 의미 ex) "Horse" is a hyponym of "animal".
car1.hyponyms()
[Synset('ambulance.n.01'), Synset('beach_wagon.n.01'), Synset('bus.n.04'), Synset('cab.n.03'), Synset('compact.n.03'), Synset('convertible.n.01'), Synset('coupe.n.01'), Synset('cruiser.n.01'), Synset('electric.n.01'), Synset('gas_guzzler.n.01'), Synset('hardtop.n.01'), Synset('hatchback.n.01'), Synset('horseless_carriage.n.01'), Synset('hot_rod.n.01'), Synset('jeep.n.01'), Synset('limousine.n.01'), Synset('loaner.n.02'), Synset('minicar.n.01'), Synset('minivan.n.01'), Synset('model_t.n.01'), Synset('pace_car.n.01'), Synset('racer.n.02'), Synset('roadster.n.01'), Synset('sedan.n.01'), Synset('sport_utility.n.01'), Synset('sports_car.n.01'), Synset('stanley_steamer.n.01'), Synset('stock_car.n.01'), Synset('subcompact.n.01'), Synset('touring_car.n.01'), Synset('used-car.n.01')]
One of the interesting things that WordNet can assist with is changing a text to be more general or more specific.
(I think by using .hpernyms and .hyponyms)
river = Word('river').get_synsets(NOUN)[0]
바로 위의 코드는 가장 일반적인 river의 뜻을 가져오고 그것을 river라는 변수에 할당하기 위한 작업을 의미하는 듯. 하지만 어떤 때에는 첫번째에 오는 것이 가장 일반적인 의미가 아닐수도 있으니 확인하는 작업이 필요. 그래서 다음의 코드들은 그것을 확인하는 작업임.
river.definition()
'a large natural stream of water (larger than a creek)'
enigma = Word('enigma').get_synsets(NOUN)[0]
enigma.definition()
'something that baffles understanding and cannot be explained'
river.path_similarity(enigma)
0.06666666666666667
river.path_similarity(bank1)
0.1111111111111111
bank1.definition()
'sloping land (especially the slope beside a body of water)'
print(river.definition(), bank1.definition(), enigma.definition())
a large natural stream of water (larger than a creek) sloping land (especially the slope beside a body of water) something that baffles understanding and cannot be explained
Q how can we print with linebreaks again..?
With some programming background, you can learn to cut up a text in new, surprising and compelling ways.
things = ['counter', 'original', 'spare', 'strange', 'fickle', 'freckled', 'swift', 'slow', 'sweet', 'sour', 'adazzle', 'dim']
def pied(words):
verse = (
'All things' + words[0] + ',' + words[1] + ',' + words [2] + ',' + words[3] + ';' + '\n' + 'Whatever is' + words[4] + ',' + words[5] + '(who knows how?)\n' + 'with' + words[6] + ',' + words[7] + ';' + words[8] + ',' + words[9] + ';' + words[10] + ',' + words[11] + ';\n' + 'He fathers-forth whose neauty is past change:\n' + '
print(pied(things))
File "/tmp/ipykernel_25743/4071698526.py", line 3 'All things' + words[0] + ',' + words[1] + ',' + words [2] + ',' + words[3] + ';' + '\n' + 'Whatever is' + words[4] + ',' + words[5] + '(who knows how?)\n' + 'with' + words[6] + ',' + words[7] + ';' + words[8] + ',' + words[9] + ';' + words[10] + ',' + words[11] + ';\n' + 'He fathers-forth whose neauty is past change:\n' + ' ^ SyntaxError: EOL while scanning string literal
Why this not work? come back again and try it again.
def beauty(words):
from re import sub
verse = """All things _,_,_,_;
Whatever is _, _ (who knows how?)
with _, _; _, _; _, _;
He fathers-forth whose beauty is past change:
Praise him. """
for word in words:
verse = sub('_', word, verse)
return verse
print(beauty(things))
All things counter,counter,counter,counter; Whatever is counter, counter (who knows how?) with counter, counter; counter, counter; counter, counter; He fathers-forth whose beauty is past change: Praise him.
The issue here is that we want only one subsitution to happen each time we go through the loop, so that one _ get replaced each time. However, our substitution metiod is too eager. There is an easy fix for this. The sub() function accepts an optional fourth argument, count, that limits how many substitutions will be made. For instance if sub(text, 'sky', 'sea', 3) were used, then sky would be replaced by sea at most three times in the string text.
def beauty(words):
from re import sub
verse = """All things _, _, _, _;
Whatever is _, _ (who knows how?)
with _, _; _, _; _, _;
He fathers-forth whose beauty is past change:
Praise him. """
for word in words:
verse = sub('_', word, verse, 1)
return verse
print(beauty(things))
All things counter, original, spare, strange; Whatever is fickle, freckled (who knows how?) with swift, slow; sweet, sour; adazzle, dim; He fathers-forth whose beauty is past change: Praise him.
from random import shuffle
shuffle(things)
print(beauty(things))
shuffle(things)
print(beauty(things))
All things dim, spare, swift, sour; Whatever is fickle, slow (who knows how?) with adazzle, sweet; freckled, strange; counter, original; He fathers-forth whose beauty is past change: Praise him. All things swift, original, dim, strange; Whatever is sweet, sour (who knows how?) with fickle, freckled; spare, counter; slow, adazzle; He fathers-forth whose beauty is past change: Praise him.
from random import choice
def sentence():
return choice(['right', 'wrong'])
sentence()
'right'
sentence()
'wrong'
sentence()
'right'
sentence()
'right'
def sentence():
return choice(['right', wrong()])
def wrong():
return choice(['rong', 'way wrong', 'wrong'])
sentence()
'wrong'
S -> 'right' | Wrong
Wrong -> 'rong'|'way wrong'|'wrong'
File "/tmp/ipykernel_25743/2086087841.py", line 1 S -> 'right' | Wrong ^ SyntaxError: invalid syntax
Q why line 189 is not working? and what can I do about it?
terminal we are done with production
nonterminal to continue with the generation of your string we will look through the other rules is the one that applies. that rule says that Wrong produces one of these three strings; 'rong', 'way wrong', 'wrong'
S -> Sentence
Sentence -> "we" Aux Act Last
Aux -> "can" | "don't" | "must" | "will" | "won't"
Act -> "find" | "know" | "learn" | "read" | "say"| "see"
Last -> "it" | Sentence
File "/tmp/ipykernel_25743/2228282657.py", line 1 S -> Sentence ^ SyntaxError: invalid syntax
Sentence: nonterminal / we:terminal / 'we' is followed by three nonterminals: Aux, Act, Last / But Last is different: this rule could result in the terminal "it" - but it could also continue generation by moving along to the Sentence rule. That does something that you may find interesting: It produces another embedded sentence.
recursive grammar
def sentence():
return "we" + aux() + act() + last()
def aux():
return choice(["can", "don't", "must", "will", "won't"])
def act():
return choice(["find", "know", "learn", "read", "say", "see"])
def last():
phrase = choice(["it", sentence()])
if not phrase:
phrase = sentence()
return phrase
I don;t understand this part def last(): phrase = choice(["it", sentence()]) if not phrase: phrase = sentence() return phrase
Q why not if phrase: phrase = sentence()?
print(sentence())
--------------------------------------------------------------------------- RecursionError Traceback (most recent call last) /tmp/ipykernel_25743/4223016270.py in <module> ----> 1 print(sentence()) /tmp/ipykernel_25743/2052733086.py in sentence() 1 def sentence(): ----> 2 return "we" + aux() + act() + last() 3 def aux(): 4 return choice(["can", "don't", "must", "will", "won't"]) 5 def act(): /tmp/ipykernel_25743/2052733086.py in last() 6 return choice(["find", "know", "learn", "read", "say", "see"]) 7 def last(): ----> 8 phrase = choice(["it", sentence()]) 9 if not phrase: 10 phrase = sentence() ... last 2 frames repeated, from the frame below ... /tmp/ipykernel_25743/2052733086.py in sentence() 1 def sentence(): ----> 2 return "we" + aux() + act() + last() 3 def aux(): 4 return choice(["can", "don't", "must", "will", "won't"]) 5 def act(): RecursionError: maximum recursion depth exceeded while calling a Python object
grammar_string = """
File "/tmp/ipykernel_25743/569317821.py", line 1 grammar_string = """ ^ SyntaxError: EOF while scanning triple-quoted string literal