SEO helper
This commit is contained in:
320
seo_helpers.py
Normal file
320
seo_helpers.py
Normal file
@@ -0,0 +1,320 @@
|
|||||||
|
# seo_helpers.py
|
||||||
|
import html
|
||||||
|
import json
|
||||||
|
import re # Добавляем импорт re
|
||||||
|
from typing import Dict, List, Any, Optional
|
||||||
|
|
||||||
|
|
||||||
|
def generate_resume_seo_tags(resume_data: Dict[str, Any], resume_id: int) -> Dict[str, str]:
|
||||||
|
"""
|
||||||
|
Генерация SEO-тегов для страницы резюме
|
||||||
|
|
||||||
|
Args:
|
||||||
|
resume_data: данные резюме из базы
|
||||||
|
resume_id: ID резюме
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict с SEO-тегами
|
||||||
|
"""
|
||||||
|
# Декодируем и экранируем данные
|
||||||
|
full_name = html.escape(resume_data.get("full_name", "") or "")
|
||||||
|
name_parts = full_name.split(' ')
|
||||||
|
first_name = name_parts[0] if name_parts else ''
|
||||||
|
last_name = ' '.join(name_parts[1:]) if len(name_parts) > 1 else ''
|
||||||
|
position = html.escape(resume_data.get("desired_position", "Специалист") or "Специалист")
|
||||||
|
salary = html.escape(resume_data.get("desired_salary", "Зарплата не указана") or "Зарплата не указана")
|
||||||
|
about = html.escape(
|
||||||
|
resume_data.get("about_me", "Профессиональный опыт и навыки") or "Профессиональный опыт и навыки")
|
||||||
|
|
||||||
|
# Формируем описание
|
||||||
|
experience_count = len(resume_data.get("work_experience", []))
|
||||||
|
tags = resume_data.get("tags", [])
|
||||||
|
skills_list = ', '.join(tags) if tags else ''
|
||||||
|
short_about = about[:157] + '...' if len(about) > 160 else about
|
||||||
|
|
||||||
|
seo_description = f"{full_name} - {position}. {salary}. Опыт работы: {experience_count} мест. Навыки: {skills_list}. {short_about}"
|
||||||
|
seo_description = seo_description[:320]
|
||||||
|
|
||||||
|
# Формируем ключевые слова
|
||||||
|
keywords = f"{full_name}, {position}, резюме, поиск сотрудников, навыки: {skills_list[:200]}"
|
||||||
|
|
||||||
|
# Формируем структурированные данные
|
||||||
|
work_experience_json = []
|
||||||
|
for exp in resume_data.get("work_experience", []):
|
||||||
|
period = exp.get("period", "")
|
||||||
|
period_parts = period.split('–') if period else []
|
||||||
|
work_experience_json.append({
|
||||||
|
"@type": "OrganizationRole",
|
||||||
|
"roleName": exp.get("position", ""),
|
||||||
|
"startDate": period_parts[0] if len(period_parts) > 0 else None,
|
||||||
|
"endDate": period_parts[1] if len(period_parts) > 1 else None,
|
||||||
|
"organization": {"@type": "Organization", "name": exp.get("company", "")}
|
||||||
|
})
|
||||||
|
|
||||||
|
education_json = []
|
||||||
|
for edu in resume_data.get("education", []):
|
||||||
|
education_json.append({
|
||||||
|
"@type": "EducationalOccupationalCredential",
|
||||||
|
"credentialCategory": "Degree",
|
||||||
|
"name": edu.get("specialty", ""),
|
||||||
|
"educationalLevel": edu.get("institution", ""),
|
||||||
|
"dateCreated": edu.get("graduation_year", "")
|
||||||
|
})
|
||||||
|
|
||||||
|
structured_data = {
|
||||||
|
"@context": "https://schema.org",
|
||||||
|
"@type": "Person",
|
||||||
|
"name": full_name,
|
||||||
|
"jobTitle": position,
|
||||||
|
"description": resume_data.get("about_me", ""),
|
||||||
|
"worksFor": work_experience_json,
|
||||||
|
"alumniOf": education_json,
|
||||||
|
"knowsAbout": tags,
|
||||||
|
"url": f"https://yarmarka.rabota.today/resume/{resume_id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
"title": f"{full_name} - {position} | Rabota.Today",
|
||||||
|
"description": seo_description,
|
||||||
|
"keywords": keywords,
|
||||||
|
"og_title": f"{full_name} - {position}",
|
||||||
|
"og_description": seo_description[:300],
|
||||||
|
"og_url": f"https://yarmarka.rabota.today/resume/{resume_id}",
|
||||||
|
"profile_first_name": first_name,
|
||||||
|
"profile_last_name": last_name,
|
||||||
|
"twitter_title": f"{full_name} - {position}",
|
||||||
|
"twitter_description": seo_description[:300],
|
||||||
|
"canonical_url": f"https://yarmarka.rabota.today/resume/{resume_id}",
|
||||||
|
"structured_data": json.dumps(structured_data, ensure_ascii=False, indent=2)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def generate_vacancy_seo_tags(vacancy_data: Dict[str, Any], vacancy_id: int) -> Dict[str, str]:
|
||||||
|
"""
|
||||||
|
Генерация SEO-тегов для страницы вакансии
|
||||||
|
|
||||||
|
Args:
|
||||||
|
vacancy_data: данные вакансии из базы
|
||||||
|
vacancy_id: ID вакансии
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict с SEO-тегами
|
||||||
|
"""
|
||||||
|
# Декодируем и экранируем данные
|
||||||
|
title = html.escape(vacancy_data.get("title", "") or "")
|
||||||
|
company = html.escape(vacancy_data.get("company_name", "Компания") or "Компания")
|
||||||
|
salary = html.escape(vacancy_data.get("salary", "Зарплата не указана") or "Зарплата не указана")
|
||||||
|
description = html.escape(
|
||||||
|
vacancy_data.get("description", "Подробная информация о вакансии") or "Подробная информация о вакансии")
|
||||||
|
|
||||||
|
# Формируем описание
|
||||||
|
tags = vacancy_data.get("tags", [])
|
||||||
|
tags_str = ', '.join(tags) if tags else ''
|
||||||
|
short_description = description[:157] + '...' if len(description) > 160 else description
|
||||||
|
|
||||||
|
seo_description = f"{title} в компании {company}. {salary}. {short_description}"
|
||||||
|
seo_description = seo_description[:320]
|
||||||
|
|
||||||
|
# Формируем ключевые слова
|
||||||
|
keywords = f"{title}, {company}, вакансия, работа, {tags_str}"
|
||||||
|
|
||||||
|
# Формируем структурированные данные для вакансии
|
||||||
|
salary_value = 0
|
||||||
|
if salary:
|
||||||
|
# Используем re для поиска чисел
|
||||||
|
salary_match = re.search(r'(\d+)', salary)
|
||||||
|
if salary_match:
|
||||||
|
salary_value = int(salary_match.group(1))
|
||||||
|
|
||||||
|
structured_data = {
|
||||||
|
"@context": "https://schema.org",
|
||||||
|
"@type": "JobPosting",
|
||||||
|
"title": title,
|
||||||
|
"description": description,
|
||||||
|
"datePosted": vacancy_data.get("created_at"),
|
||||||
|
"validThrough": vacancy_data.get("valid_through"),
|
||||||
|
"employmentType": "FULL_TIME",
|
||||||
|
"hiringOrganization": {
|
||||||
|
"@type": "Organization",
|
||||||
|
"name": company,
|
||||||
|
"sameAs": vacancy_data.get("company_website", ""),
|
||||||
|
"logo": vacancy_data.get("company_logo", "https://yarmarka.rabota.today/static/images/logo.png")
|
||||||
|
},
|
||||||
|
"jobLocation": {
|
||||||
|
"@type": "Place",
|
||||||
|
"address": {
|
||||||
|
"@type": "PostalAddress",
|
||||||
|
"addressLocality": vacancy_data.get("company_address", "Москва"),
|
||||||
|
"addressCountry": "RU"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"baseSalary": {
|
||||||
|
"@type": "MonetaryAmount",
|
||||||
|
"currency": "RUB",
|
||||||
|
"value": {
|
||||||
|
"@type": "QuantitativeValue",
|
||||||
|
"value": salary_value,
|
||||||
|
"unitText": "MONTH"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"workHours": "Полный день"
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
"title": f"{title} в {company} | Rabota.Today",
|
||||||
|
"description": seo_description,
|
||||||
|
"keywords": keywords,
|
||||||
|
"og_title": f"{title} в {company}",
|
||||||
|
"og_description": seo_description[:300],
|
||||||
|
"og_url": f"https://yarmarka.rabota.today/vacancy/{vacancy_id}",
|
||||||
|
"twitter_title": f"{title} в {company}",
|
||||||
|
"twitter_description": seo_description[:300],
|
||||||
|
"canonical_url": f"https://yarmarka.rabota.today/vacancy/{vacancy_id}",
|
||||||
|
"structured_data": json.dumps(structured_data, ensure_ascii=False, indent=2)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def inject_seo_tags(html_template: str, seo_tags: Dict[str, str]) -> str:
|
||||||
|
"""
|
||||||
|
Внедрение SEO-тегов в HTML шаблон
|
||||||
|
|
||||||
|
Args:
|
||||||
|
html_template: исходный HTML
|
||||||
|
seo_tags: словарь с SEO-тегами
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
HTML с замененными SEO-тегами
|
||||||
|
"""
|
||||||
|
replacements = {
|
||||||
|
'<title id="pageTitle">Резюме | Rabota.Today</title>': f'<title>{seo_tags.get("title", "Rabota.Today")}</title>',
|
||||||
|
'<title id="pageTitle">Вакансия | Rabota.Today</title>': f'<title>{seo_tags.get("title", "Rabota.Today")}</title>',
|
||||||
|
'<meta name="description" id="metaDescription"': f'<meta name="description"',
|
||||||
|
'<meta name="keywords"': f'<meta name="keywords"',
|
||||||
|
'<meta property="og:title" id="ogTitle"': f'<meta property="og:title"',
|
||||||
|
'<meta property="og:description" id="ogDescription"': f'<meta property="og:description"',
|
||||||
|
'<meta property="og:url" id="ogUrl"': f'<meta property="og:url"',
|
||||||
|
'<meta property="profile:first_name" id="profileFirstName"': f'<meta property="profile:first_name"',
|
||||||
|
'<meta property="profile:last_name" id="profileLastName"': f'<meta property="profile:last_name"',
|
||||||
|
'<meta name="twitter:title" id="twitterTitle"': f'<meta name="twitter:title"',
|
||||||
|
'<meta name="twitter:description" id="twitterDescription"': f'<meta name="twitter:description"',
|
||||||
|
'<link rel="canonical" id="canonicalUrl"': f'<link rel="canonical"'
|
||||||
|
}
|
||||||
|
|
||||||
|
result = html_template
|
||||||
|
|
||||||
|
# Заменяем общие теги
|
||||||
|
for old, new in replacements.items():
|
||||||
|
if old in result:
|
||||||
|
result = result.replace(old, new)
|
||||||
|
|
||||||
|
# Вставляем конкретные значения
|
||||||
|
if 'description' in seo_tags:
|
||||||
|
desc_pattern = '<meta name="description" content="'
|
||||||
|
desc_start = result.find(desc_pattern)
|
||||||
|
if desc_start != -1:
|
||||||
|
desc_end = result.find('">', desc_start)
|
||||||
|
if desc_end != -1:
|
||||||
|
result = result[
|
||||||
|
:desc_start] + f'<meta name="description" content="{seo_tags["description"]}">' + result[
|
||||||
|
desc_end + 2:]
|
||||||
|
|
||||||
|
if 'keywords' in seo_tags:
|
||||||
|
keywords_pattern = '<meta name="keywords" content="'
|
||||||
|
keywords_start = result.find(keywords_pattern)
|
||||||
|
if keywords_start != -1:
|
||||||
|
keywords_end = result.find('">', keywords_start)
|
||||||
|
if keywords_end != -1:
|
||||||
|
result = result[:keywords_start] + f'<meta name="keywords" content="{seo_tags["keywords"]}">' + result[
|
||||||
|
keywords_end + 2:]
|
||||||
|
|
||||||
|
if 'og_title' in seo_tags:
|
||||||
|
og_title_pattern = '<meta property="og:title" content="'
|
||||||
|
og_title_start = result.find(og_title_pattern)
|
||||||
|
if og_title_start != -1:
|
||||||
|
og_title_end = result.find('">', og_title_start)
|
||||||
|
if og_title_end != -1:
|
||||||
|
result = result[
|
||||||
|
:og_title_start] + f'<meta property="og:title" content="{seo_tags["og_title"]}">' + result[
|
||||||
|
og_title_end + 2:]
|
||||||
|
|
||||||
|
if 'og_description' in seo_tags:
|
||||||
|
og_desc_pattern = '<meta property="og:description" content="'
|
||||||
|
og_desc_start = result.find(og_desc_pattern)
|
||||||
|
if og_desc_start != -1:
|
||||||
|
og_desc_end = result.find('">', og_desc_start)
|
||||||
|
if og_desc_end != -1:
|
||||||
|
result = result[
|
||||||
|
:og_desc_start] + f'<meta property="og:description" content="{seo_tags["og_description"]}">' + result[
|
||||||
|
og_desc_end + 2:]
|
||||||
|
|
||||||
|
if 'og_url' in seo_tags:
|
||||||
|
og_url_pattern = '<meta property="og:url" content="'
|
||||||
|
og_url_start = result.find(og_url_pattern)
|
||||||
|
if og_url_start != -1:
|
||||||
|
og_url_end = result.find('">', og_url_start)
|
||||||
|
if og_url_end != -1:
|
||||||
|
result = result[:og_url_start] + f'<meta property="og:url" content="{seo_tags["og_url"]}">' + result[
|
||||||
|
og_url_end + 2:]
|
||||||
|
|
||||||
|
if 'profile_first_name' in seo_tags:
|
||||||
|
first_name_pattern = '<meta property="profile:first_name" content="'
|
||||||
|
first_name_start = result.find(first_name_pattern)
|
||||||
|
if first_name_start != -1:
|
||||||
|
first_name_end = result.find('">', first_name_start)
|
||||||
|
if first_name_end != -1:
|
||||||
|
result = result[
|
||||||
|
:first_name_start] + f'<meta property="profile:first_name" content="{seo_tags["profile_first_name"]}">' + result[
|
||||||
|
first_name_end + 2:]
|
||||||
|
|
||||||
|
if 'profile_last_name' in seo_tags:
|
||||||
|
last_name_pattern = '<meta property="profile:last_name" content="'
|
||||||
|
last_name_start = result.find(last_name_pattern)
|
||||||
|
if last_name_start != -1:
|
||||||
|
last_name_end = result.find('">', last_name_start)
|
||||||
|
if last_name_end != -1:
|
||||||
|
result = result[
|
||||||
|
:last_name_start] + f'<meta property="profile:last_name" content="{seo_tags["profile_last_name"]}">' + result[
|
||||||
|
last_name_end + 2:]
|
||||||
|
|
||||||
|
if 'twitter_title' in seo_tags:
|
||||||
|
twitter_title_pattern = '<meta name="twitter:title" content="'
|
||||||
|
twitter_title_start = result.find(twitter_title_pattern)
|
||||||
|
if twitter_title_start != -1:
|
||||||
|
twitter_title_end = result.find('">', twitter_title_start)
|
||||||
|
if twitter_title_end != -1:
|
||||||
|
result = result[
|
||||||
|
:twitter_title_start] + f'<meta name="twitter:title" content="{seo_tags["twitter_title"]}">' + result[
|
||||||
|
twitter_title_end + 2:]
|
||||||
|
|
||||||
|
if 'twitter_description' in seo_tags:
|
||||||
|
twitter_desc_pattern = '<meta name="twitter:description" content="'
|
||||||
|
twitter_desc_start = result.find(twitter_desc_pattern)
|
||||||
|
if twitter_desc_start != -1:
|
||||||
|
twitter_desc_end = result.find('">', twitter_desc_start)
|
||||||
|
if twitter_desc_end != -1:
|
||||||
|
result = result[
|
||||||
|
:twitter_desc_start] + f'<meta name="twitter:description" content="{seo_tags["twitter_description"]}">' + result[
|
||||||
|
twitter_desc_end + 2:]
|
||||||
|
|
||||||
|
if 'canonical_url' in seo_tags:
|
||||||
|
canonical_pattern = '<link rel="canonical" href="'
|
||||||
|
canonical_start = result.find(canonical_pattern)
|
||||||
|
if canonical_start != -1:
|
||||||
|
canonical_end = result.find('">', canonical_start)
|
||||||
|
if canonical_end != -1:
|
||||||
|
result = result[
|
||||||
|
:canonical_start] + f'<link rel="canonical" href="{seo_tags["canonical_url"]}">' + result[
|
||||||
|
canonical_end + 2:]
|
||||||
|
|
||||||
|
# Вставляем структурированные данные
|
||||||
|
if 'structured_data' in seo_tags:
|
||||||
|
structured_pattern = '<script type="application/ld+json" id="structuredData">'
|
||||||
|
structured_start = result.find(structured_pattern)
|
||||||
|
if structured_start != -1:
|
||||||
|
structured_end = result.find('</script>', structured_start)
|
||||||
|
if structured_end != -1:
|
||||||
|
result = result[
|
||||||
|
:structured_start] + f'<script type="application/ld+json">\n{seo_tags["structured_data"]}\n</script>\n<script type="application/ld+json" id="structuredData" style="display:none;">' + result[
|
||||||
|
structured_end + 9:]
|
||||||
|
|
||||||
|
return result
|
||||||
140
server.py
140
server.py
@@ -17,6 +17,7 @@ import uvicorn
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
import traceback
|
import traceback
|
||||||
from fastapi.middleware.trustedhost import TrustedHostMiddleware
|
from fastapi.middleware.trustedhost import TrustedHostMiddleware
|
||||||
|
from seo_helpers import generate_resume_seo_tags, generate_vacancy_seo_tags, inject_seo_tags
|
||||||
|
|
||||||
from fastapi.middleware.cors import CORSMiddleware
|
from fastapi.middleware.cors import CORSMiddleware
|
||||||
from fastapi.responses import JSONResponse
|
from fastapi.responses import JSONResponse
|
||||||
@@ -766,11 +767,62 @@ async def get_vacancies():
|
|||||||
|
|
||||||
|
|
||||||
@app.get("/vacancy/{vacancy_id}", response_class=HTMLResponse)
|
@app.get("/vacancy/{vacancy_id}", response_class=HTMLResponse)
|
||||||
async def get_vacancy_detail(vacancy_id: int):
|
async def get_vacancy_page(request: Request, vacancy_id: int):
|
||||||
"""Страница конкретной вакансии"""
|
"""Страница детального просмотра вакансии с SEO-тегами на сервере"""
|
||||||
|
try:
|
||||||
|
with get_db() as conn:
|
||||||
|
cursor = conn.cursor()
|
||||||
|
|
||||||
|
cursor.execute("""
|
||||||
|
SELECT v.*,
|
||||||
|
COALESCE(c.name, u.full_name) as company_name,
|
||||||
|
c.description as company_description,
|
||||||
|
c.website as company_website,
|
||||||
|
c.address as company_address,
|
||||||
|
u.full_name as user_name,
|
||||||
|
u.email as user_email,
|
||||||
|
u.telegram as user_telegram
|
||||||
|
FROM vacancies v
|
||||||
|
JOIN users u ON v.user_id = u.id
|
||||||
|
LEFT JOIN companies c ON v.user_id = c.user_id
|
||||||
|
WHERE v.id = ?
|
||||||
|
""", (vacancy_id,))
|
||||||
|
|
||||||
|
vacancy = cursor.fetchone()
|
||||||
|
if not vacancy:
|
||||||
|
return HTMLResponse(content="<h1>Вакансия не найдена</h1>")
|
||||||
|
|
||||||
|
# Получаем теги
|
||||||
|
cursor.execute("""
|
||||||
|
SELECT t.name FROM tags t
|
||||||
|
JOIN vacancy_tags vt ON t.id = vt.tag_id
|
||||||
|
WHERE vt.vacancy_id = ?
|
||||||
|
""", (vacancy_id,))
|
||||||
|
tags = [t["name"] for t in cursor.fetchall()]
|
||||||
|
|
||||||
|
vacancy_data = dict(vacancy)
|
||||||
|
vacancy_data["tags"] = tags
|
||||||
|
|
||||||
|
# Генерируем SEO-теги
|
||||||
|
seo_tags = generate_vacancy_seo_tags(vacancy_data, vacancy_id)
|
||||||
|
|
||||||
|
# Читаем HTML шаблон
|
||||||
file_path = TEMPLATES_DIR / "vacancy_detail.html"
|
file_path = TEMPLATES_DIR / "vacancy_detail.html"
|
||||||
return FileResponse(file_path) if file_path.exists() else HTMLResponse(
|
if not file_path.exists():
|
||||||
content=f"<h1>Вакансия #{vacancy_id}</h1><p>Создайте файл templates/vacancy_detail.html</p>")
|
return HTMLResponse(content="<h1>Страница не найдена</h1>")
|
||||||
|
|
||||||
|
with open(file_path, "r", encoding="utf-8") as f:
|
||||||
|
html = f.read()
|
||||||
|
|
||||||
|
# Внедряем SEO-теги в HTML
|
||||||
|
html = inject_seo_tags(html, seo_tags)
|
||||||
|
|
||||||
|
return HTMLResponse(content=html)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"❌ Ошибка: {e}")
|
||||||
|
traceback.print_exc()
|
||||||
|
return HTMLResponse(content=f"<h1>Ошибка</h1><p>{str(e)}</p>")
|
||||||
|
|
||||||
|
|
||||||
@app.get("/resumes", response_class=HTMLResponse)
|
@app.get("/resumes", response_class=HTMLResponse)
|
||||||
@@ -782,11 +834,83 @@ async def get_resumes():
|
|||||||
|
|
||||||
|
|
||||||
@app.get("/resume/{resume_id}", response_class=HTMLResponse)
|
@app.get("/resume/{resume_id}", response_class=HTMLResponse)
|
||||||
async def get_resume_detail(resume_id: int):
|
async def get_resume_page(request: Request, resume_id: int):
|
||||||
"""Страница конкретного резюме"""
|
"""Страница детального просмотра резюме с SEO-тегами на сервере"""
|
||||||
|
try:
|
||||||
|
with get_db() as conn:
|
||||||
|
cursor = conn.cursor()
|
||||||
|
|
||||||
|
# Получаем резюме с данными пользователя
|
||||||
|
cursor.execute("""
|
||||||
|
SELECT r.*, u.full_name, u.email, u.phone, u.telegram
|
||||||
|
FROM resumes r
|
||||||
|
JOIN users u ON r.user_id = u.id
|
||||||
|
WHERE r.id = ?
|
||||||
|
""", (resume_id,))
|
||||||
|
|
||||||
|
resume = cursor.fetchone()
|
||||||
|
if not resume:
|
||||||
file_path = TEMPLATES_DIR / "resume_detail.html"
|
file_path = TEMPLATES_DIR / "resume_detail.html"
|
||||||
return FileResponse(file_path) if file_path.exists() else HTMLResponse(
|
if file_path.exists():
|
||||||
content=f"<h1>Резюме #{resume_id}</h1><p>Создайте файл templates/resume_detail.html</p>")
|
with open(file_path, "r", encoding="utf-8") as f:
|
||||||
|
html = f.read()
|
||||||
|
html = html.replace('<title id="pageTitle">Резюме | Rabota.Today</title>',
|
||||||
|
'<title>Резюме не найдено | Rabota.Today</title>')
|
||||||
|
return HTMLResponse(content=html)
|
||||||
|
return HTMLResponse(content="<h1>Резюме не найдено</h1>")
|
||||||
|
|
||||||
|
# Получаем теги
|
||||||
|
cursor.execute("""
|
||||||
|
SELECT t.name FROM tags t
|
||||||
|
JOIN resume_tags rt ON t.id = rt.tag_id
|
||||||
|
WHERE rt.resume_id = ?
|
||||||
|
""", (resume_id,))
|
||||||
|
tags = [t["name"] for t in cursor.fetchall()]
|
||||||
|
|
||||||
|
# Получаем опыт работы
|
||||||
|
cursor.execute("""
|
||||||
|
SELECT position, company, period, description
|
||||||
|
FROM work_experience
|
||||||
|
WHERE resume_id = ?
|
||||||
|
ORDER BY period DESC
|
||||||
|
""", (resume_id,))
|
||||||
|
work_experience = cursor.fetchall()
|
||||||
|
|
||||||
|
# Получаем образование
|
||||||
|
cursor.execute("""
|
||||||
|
SELECT institution, specialty, graduation_year
|
||||||
|
FROM education
|
||||||
|
WHERE resume_id = ?
|
||||||
|
ORDER BY graduation_year DESC
|
||||||
|
""", (resume_id,))
|
||||||
|
education = cursor.fetchall()
|
||||||
|
|
||||||
|
# Формируем данные для SEO
|
||||||
|
resume_data = dict(resume)
|
||||||
|
resume_data["tags"] = tags
|
||||||
|
resume_data["work_experience"] = [dict(exp) for exp in work_experience]
|
||||||
|
resume_data["education"] = [dict(edu) for edu in education]
|
||||||
|
|
||||||
|
# Генерируем SEO-теги
|
||||||
|
seo_tags = generate_resume_seo_tags(resume_data, resume_id)
|
||||||
|
|
||||||
|
# Читаем HTML шаблон
|
||||||
|
file_path = TEMPLATES_DIR / "resume_detail.html"
|
||||||
|
if not file_path.exists():
|
||||||
|
return HTMLResponse(content="<h1>Страница не найдена</h1>")
|
||||||
|
|
||||||
|
with open(file_path, "r", encoding="utf-8") as f:
|
||||||
|
html = f.read()
|
||||||
|
|
||||||
|
# Внедряем SEO-теги в HTML
|
||||||
|
html = inject_seo_tags(html, seo_tags)
|
||||||
|
|
||||||
|
return HTMLResponse(content=html)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"❌ Ошибка при загрузке страницы резюме: {e}")
|
||||||
|
traceback.print_exc()
|
||||||
|
return HTMLResponse(content=f"<h1>Ошибка</h1><p>{str(e)}</p>")
|
||||||
|
|
||||||
|
|
||||||
@app.get("/favorites", response_class=HTMLResponse)
|
@app.get("/favorites", response_class=HTMLResponse)
|
||||||
|
|||||||
Reference in New Issue
Block a user