Jump to content

Resume Renamer 260120: Difference between revisions

From Game in the Brain Wiki
No edit summary
 
(One intermediate revision by the same user not shown)
Line 148: Line 148:


== Appendix: The Python Script ==
== Appendix: The Python Script ==
=== Rename Resumes Script ===
Copy the code below into rename_resumes.py.
Copy the code below into rename_resumes.py.


<syntaxhighlight lang="python">
<syntaxhighlight lang="python">
# --- IMPROVED FUNCTION: SMART PDF READER (Skips Forms & Signature Pages) ---
def get_smart_pdf_text(filepath):
    """
    Reads PDF pages but SKIPS pages that look like 'Application Forms'.
    Returns the text of the first 2 'valid' resume pages found.
    """
    valid_text = ""
    pages_read = 0
   
    # Phrases that indicate a page is a FORM, not a Resume
    skip_phrases = [
        "APPLICATION FOR EMPLOYMENT",
        "OFFICIAL USE ONLY",
        "DO NOT WRITE BELOW THIS LINE",
        "PERSONAL DATA SHEET",
        "APPLICANT'S SIGNATURE",  # Found on Page 2 of your file
        "FAMILY BACKGROUND"        # Found on Page 2 of your file
    ]
    try:
        with pdfplumber.open(filepath) as pdf:
            for page in pdf.pages:
                text = page.extract_text() or ""
               
                # CHECK: Is this page just a form?
                # We check if ANY of the skip phrases appear in the text
                is_form = any(phrase in text.upper() for phrase in skip_phrases)
               
                if is_form:
                    print(f"    [INFO] Skipped a 'Form' page (found key phrase)...")
                    continue  # Skip this page, check the next one
               
                # If not a form, it's likely the resume. Keep it.
                valid_text += text + "\n"
                pages_read += 1
               
                # Stop after finding 2 valid pages of resume content
                if pages_read >= 2:
                    break
                   
    except Exception as e:
        print(f"    [ERROR] PDF Read Error: {e}")
        return ""
       
    return valid_text
# --------------------------------------
</syntaxhighlight>
=== Ocr Converter Script ===
Copy the code below into ocr_converter.py. Of course the Renamer doesnt work with Image PDFs, so you have to convert this. Also this is only as good as the VISION model used. <syntaxhighlight lang="bash">python3 ocr_converter.py</syntaxhighlight><syntaxhighlight lang="python">
import os
import os
import requests
import subprocess
import json
import pdfplumber
import pdfplumber
import re
from datetime import datetime
import time


--- OPTIONAL DEPENDENCY: python-docx ---
# Configuration
FOLDER_PATH = "."  # Current folder
MIN_TEXT_LENGTH = 50  # If text is less than this, we assume it's an image


DOCX_AVAILABLE = False
def has_embedded_text(file_path):
try:
    """Checks if a PDF already has text."""
from docx import Document
    try:
DOCX_AVAILABLE = True
        with pdfplumber.open(file_path) as pdf:
except ImportError:
            full_text = ""
print("Warning: 'python-docx' not found. .docx files will be skipped.")
            for page in pdf.pages:
print("To support Word docs, run: pip install python-docx")
                text = page.extract_text()
                if text:
                    full_text += text
           
            # If we found enough text, return True
            if len(full_text.strip()) > MIN_TEXT_LENGTH:
                return True
    except Exception as e:
        print(f"Error reading {file_path}: {e}")
        return False
    return False


--- CONFIGURATION ---
def ocr_file(file_path):
    """Runs OCRmyPDF on the file."""
    output_path = file_path.replace(".pdf", "_OCR.pdf")
   
    # Don't re-OCR if the output already exists
    if os.path.exists(output_path):
        print(f"Skipping {file_path} (OCR version already exists)")
        return


FOLDER_PATH = os.path.dirname(os.path.abspath(file))
    print(f"🖼️  Image Detected: Converting {file_path}...")
   
    try:
        # Run the OCR command
        # --force-ocr: Process even if it thinks there is some text (often garbage in scans)
        # --deskew: Straighten crooked scans
        command = [
            "ocrmypdf",
            "--force-ocr",
            "--deskew",
            file_path,
            output_path
        ]
       
        result = subprocess.run(command, capture_output=True, text=True)
       
        if result.returncode == 0:
            print(f"✅ Success: Created {output_path}")
        else:
            print(f"❌ Failed to OCR {file_path}")
            print(result.stderr)
           
    except FileNotFoundError:
        print("❌ Error: 'ocrmypdf' is not installed. Run 'sudo apt install ocrmypdf' first.")


You can change this to "llama3" or "mistral" if installed
def main():
    print("🔍 Scanning for image-based PDFs...")
    files = [f for f in os.listdir(FOLDER_PATH) if f.lower().endswith(".pdf") and "_OCR" not in f]
   
    count = 0
    for filename in files:
        file_path = os.path.join(FOLDER_PATH, filename)
       
        if not has_embedded_text(file_path):
            ocr_file(file_path)
            count += 1
           
    if count == 0:
        print("🎉 No image-only PDFs found. All files differ have text!")
    else:
        print(f"\n✨ Processed {count} files.")


OLLAMA_MODEL = "granite3.3:2b"
if __name__ == "__main__":
    main()
</syntaxhighlight>


---------------------
=== PDF 2 VCF Script ===
Copy the code below into pdf2vcf.py. This creates a bulk VCF file so you can load this into your contacts. <syntaxhighlight lang="bash">python3 pdf2vcf.py</syntaxhighlight><syntaxhighlight lang="python">
import os
import requests
import json
import pdfplumber
import re
from datetime import datetime
import time


def get_os_creation_date(filepath):
# --- CONFIGURATION ---
"""Last resort: Gets OS file creation date in YYMMDD format."""
FOLDER_PATH = os.path.dirname(os.path.abspath(__file__))
try:
OLLAMA_MODEL = "granite3.3:2b"
timestamp = os.path.getctime(filepath)
# ---------------------
return datetime.fromtimestamp(timestamp).strftime('%y%m%d')
except:
return datetime.now().strftime('%y%m%d')


def extract_latest_year_heuristic(text):
def get_timestamp():
"""
    """Returns current YYMMDD-HHMMSS"""
Scans for years (2000-2059), including spaced years (2 0 2 4).
    return datetime.now().strftime('%y%m%d-%H%M%S')
Returns the HIGHEST year found.
"""
current_year = datetime.now().year
found_years = []


# 1. Standard Years (e.g., "2024", "2023-2024")
def get_short_date():
matches_standard = re.findall(r'(?<!\d)(20[0-5][0-9])(?!\d)', text)
    """Returns current YYMMDD"""
if matches_standard:
    return datetime.now().strftime('%y%m%d')
    found_years.extend([int(y) for y in matches_standard])


# 2. Spaced Years (e.g., "2 0 2 4")
# --- SMART PDF READER ---
matches_spaced = re.findall(r'(?<!\d)2\s+0\s+[0-5]\s+[0-9](?!\d)', text)
def get_smart_pdf_text(filepath):
if matches_spaced:
    """
    for m in matches_spaced:
    Reads PDF pages but SKIPS pages that look like 'Application Forms'.
         clean_year = int(m.replace(" ", ""))
    Returns the text of the first 2 'valid' resume pages found.
        found_years.append(clean_year)
    """
    valid_text = ""
    pages_read = 0
    skip_phrases = [
        "APPLICATION FOR EMPLOYMENT", "OFFICIAL USE ONLY",  
        "DO NOT WRITE BELOW THIS LINE", "PERSONAL DATA SHEET",
         "APPLICANT'S SIGNATURE", "FAMILY BACKGROUND"
    ]


if found_years:
    try:
    valid_years = [y for y in found_years if y <= current_year + 5]
        with pdfplumber.open(filepath) as pdf:
      
            for page in pdf.pages:
     if valid_years:
                text = page.extract_text() or ""
         latest_year = max(valid_years)
                # CHECK: Is this page just a form?
        short_year = str(latest_year)[2:]
                if any(phrase in text.upper() for phrase in skip_phrases):
         return f"{short_year}0101"
                    continue
               
                valid_text += text + "\n"
                pages_read += 1
                if pages_read >= 2: break      
     except Exception as e:
         print(f"    [ERROR] PDF Read Error: {e}")
         return ""
    return valid_text


return None
def clean_text_for_llm(text):
    clean = " ".join(text.split())
    return clean[:6000]


def parse_name_from_filename(filename):
    """
    Fallback: Tries to guess the name from a filename like '260101 Kim Ong Diploma.pdf'
    """
    # Remove extension
    base = os.path.splitext(filename)[0]
   
    # Regex: Look for 6 digits at start, then text
    match = re.search(r'^\d{6}\s+(.*?)\s+(?:Bachelor|Diploma|Certificate|General|Master|PhD|Associate|Engineer|Architect)', base, re.IGNORECASE)
    if match:
        return match.group(1).strip()
   
    # Weaker Regex: Just take the first 3 words after the date
    match_weak = re.search(r'^\d{6}\s+([A-Za-z-]+\s+[A-Za-z-]+\s?[A-Za-z-]*)', base)
    if match_weak:
        return match_weak.group(1).strip()


def extract_text_from_docx(filepath):
    return None
"""Reads text from .docx files, including tables."""
if not DOCX_AVAILABLE:
return ""
try:
doc = Document(filepath)
full_text = []
for para in doc.paragraphs:
full_text.append(para.text)
for table in doc.tables:
for row in table.rows:
for cell in row.cells:
full_text.append(cell.text)
return "\n".join(full_text)
except Exception as e:
print(f"[ERROR] Reading DOCX: {e}")
return ""


def clean_text_for_llm(text):
def ask_ollama_extraction(text, filename):
clean = " ".join(text.split())
    """
# Limit to 4000 chars to prevent choking small models
    Asks LLM to extract specific fields, using the FILENAME as a hint.
return clean[:4000]
    """
 
    system_instruction = (
def ask_ollama(text):
        "You are a Data Extraction Expert. Extract details from the resume.\n"
system_instruction = (
        f"CONTEXT: The file is named '{filename}'. This filename likely contains the correct spelling of the Name and Degree.\n"
"You are a data extraction assistant. "
        "\nRULES:\n"
"Extract the applicant's Full Name and Background."
        "1. **Double Check the Name:** If the resume text has OCR errors (e.g., 'K1m 0ng'), use the spelling from the Filename ('Kim Ong').\n"
"\n\nBackground Extraction Rules (STRICT):\n"
        "2. **Extract:** Full Name, Educational Degree (Short), Email, Phone, and Summary.\n"
"1. MANDATORY: You MUST prefer the Educational Degree over any job title.\n"
        "3. **Summary:** Write a concise 3-sentence summary of their key skills.\n"
"  - Example: If text says 'IT Intern' AND 'Diploma in Information Technology', output 'Diploma in Information Technology'.\n"
        "\nRETURN JSON ONLY:\n"
"   - Example: If text says 'Mechanical Engineering Student', output 'Diploma in Mechanical Engineering' (if listed) or 'Mechanical Engineering'.\n"
        "{\n"
"2. FORBIDDEN: Do NOT use 'Intern', 'Student', 'Assistant', or 'Worker' as the background unless NO degree is mentioned.\n"
        ' "name": "John Doe",\n'
"\nOutput strictly in this format: Name | Background."
        ' "degree": "BS IT",\n'
"\nDo NOT include notes, explanations, or numbered lists."
        ' "email": "john@email.com",\n'
)
        ' "phone": "09123456789",\n'
        '  "summary": "Experienced in..."\n'
        "}"
    )


prompt = f"Resume Text:\n{text}\n\n{system_instruction}"
    prompt = f"Resume Text:\n{text}\n\n{system_instruction}"


url = "http://localhost:11434/api/generate"
    url = "http://localhost:11434/api/generate"
data = {
    data = {
    "model": OLLAMA_MODEL,
        "model": OLLAMA_MODEL,
    "prompt": prompt,
        "prompt": prompt,
    "stream": False,
        "stream": False,
    "options": {
        "format": "json",
        "temperature": 0.1,  
        "options": {"temperature": 0.1, "num_ctx": 4096}
        "num_ctx": 4096  
     }
     }
}


try:
    try:
    # Added timeout to prevent hanging on one file
        response = requests.post(url, json=data, timeout=60)
    response = requests.post(url, json=data, timeout=60)
        response.raise_for_status()
    response.raise_for_status()
        result_json = response.json()['response']
    result = response.json()['response'].strip()
        return json.loads(result_json)
     return result
     except Exception as e:
except Exception as e:
        print(f"    [Warning] AI Extraction failed: {e}")
    print(f"    [Warning] Ollama call failed: {e}")
        return None
    return None
 
 
def fix_spaced_names(text):
# Fixes "J O H N" -> "JOHN"
return re.sub(r'(?<=\b[A-Za-z])\s+(?=[A-Za-z]\b)', '', text)


def clean_extracted_string(s):
def create_vcard_string(data, creation_date):
# Remove lists (1.), labels (Name:), and fix spacing
    """
s = re.sub(r'^(1.|2.|Name:|Background:|\d\W)', '', s, flags=re.IGNORECASE)
    Formats the data into VCF 3.0 format.
s = fix_spaced_names(s)
    Format: Name Degree YYMMDD (All in First Name field for easy searching)
s = s.split('\n')[0]
    """
s = re.split(r'(?i)note\s*:', s)[0]
    name = data.get("name", "Unknown")
    degree = data.get("degree", "")
    email = data.get("email", "")
    phone = data.get("phone", "")
    summary = data.get("summary", "")


# Truncate to safe filename length
    # Sanitize inputs
if len(s) > 60:
    if not name or name == "Unknown":
    s = s[:60].strip()
        name = "Unknown Candidate"
      
      
return s.strip().title()
    complex_name = f"{name} {degree} {creation_date}".strip()
 
 
def get_name_fallback(text):
"""
If AI returns 'Name' or 'Unknown', this function grabs the
first non-empty line of the resume, which is usually the name.
"""
lines = [line.strip() for line in text.split('\n') if line.strip()]
 
ignore_list = ['resume', 'curriculum vitae', 'cv', 'profile', 'bio', 'page', 'summary', 'objective', 'name', 'contact']
 
for line in lines:
    lower_line = line.lower()
    if len(line) < 3 or any(w in lower_line for w in ignore_list):
        continue
      
      
     word_count = len(line.split())
     vcf = [
    if word_count > 5: continue # Names rarely have >5 words
        "BEGIN:VCARD",
    if "looking for" in lower_line or "seeking" in lower_line: continue
        "VERSION:3.0",
 
        f"N:;{complex_name};;;",
    if len(line) < 50 and not re.search(r'[0-9!@#$%^&*()_+={};"<>?]', line):
        f"FN:{complex_name}",
         print(f"   [Fallback] AI failed. Guessed name from first line: {line}")
        f"TEL;TYPE=CELL:{phone}",
         return line
        f"EMAIL;TYPE=WORK:{email}",
       
        f"NOTE:{summary} (Extracted via AI)",
return "Unknown Applicant"
         f"REV:{datetime.now().isoformat()}",
         "END:VCARD"
    ]
    return "\n".join(vcf) + "\n"


def process_to_vcf():
    output_filename = f"{get_timestamp()}_Bulk_Import.vcf"
    output_path = os.path.join(FOLDER_PATH, output_filename)
    creation_date = get_short_date()


def process_folder():
    print(f"--- Smart Resume to VCF Exporter ---")
print(f"--- Resume Renamer (Strict Degree Priority + Resilient) ---")
    print(f"Target Output: {output_filename}")
print(f"Working in: {FOLDER_PATH}\n")
 
count_success = 0
count_fail = 0
script_name = os.path.basename(__file__)
 
for filename in os.listdir(FOLDER_PATH):
    # 1. Check Extension
    file_ext = os.path.splitext(filename)[1].lower()
    if filename == script_name:
        continue
      
      
     if file_ext == '.docx' and not DOCX_AVAILABLE:
     count = 0
        continue
      
      
     if file_ext not in ['.pdf', '.docx']:
     with open(output_path, "w", encoding="utf-8") as vcf_file:
        continue
       
        for filename in os.listdir(FOLDER_PATH):
            if not filename.lower().endswith(".pdf"):
                continue


    filepath = os.path.join(FOLDER_PATH, filename)
            filepath = os.path.join(FOLDER_PATH, filename)
    text = ""
            print(f"Processing: {filename}...")
   
    # 2. Extract Text
    print(f"Processing: {filename}...")
    try:
        if file_ext == '.pdf':
            with pdfplumber.open(filepath) as pdf:
                for i in range(min(2, len(pdf.pages))):
                    text += pdf.pages[i].extract_text() or ""
        elif file_ext == '.docx':
            text = extract_text_from_docx(filepath)
           
        if len(text) < 50:
            print(f"    [SKIP] Text too short.")
            count_fail += 1
            continue
           
    except Exception as e:
        print(f"    [ERROR] Reading file: {e}")
        count_fail += 1
        continue


    # 3. GET DATE
            # 1. Get Text
    date_str = extract_latest_year_heuristic(text)
            text = get_smart_pdf_text(filepath)
    if not date_str:
            if len(text) < 50:
        date_str = get_os_creation_date(filepath)
                print("    [SKIP] Text too short/unreadable.")
        print(f"    [Fallback] Using OS Date: {date_str}")
                continue


    # 4. GET NAME/BG
            # 2. Extract Data (Passing filename for context)
    # Add a tiny delay to give Ollama a breather between files
            time.sleep(0.5)  
    time.sleep(0.5)
            data = ask_ollama_extraction(clean_text_for_llm(text), filename)
    llm_output = ask_ollama(clean_text_for_llm(text))
   
    name = None
    bg = "General"


    if llm_output:
            if data:
        if "|" in llm_output:
                # 3. Double Check Name (Python Logic Fallback)
            parts = llm_output.split('|', 1)
                # If AI gave a bad name, or "Unknown", try to grab it from the filename manually
            name = parts[0].strip()
                ai_name = data.get("name", "")
            bg = parts[1].strip()
                if not ai_name or "unknown" in ai_name.lower() or any(char.isdigit() for char in ai_name):
        elif "\n" in llm_output:
                    fallback_name = parse_name_from_filename(filename)
            lines = [line.strip() for line in llm_output.split('\n') if line.strip()]
                    if fallback_name:
            if len(lines) >= 2:
                        print(f"    [Correction] Replaced '{ai_name}' with filename name: '{fallback_name}'")
                name = lines[0]
                        data['name'] = fallback_name
                bg = lines[1]
       
        # --- IMPROVED FALLBACK CHECK ---
        forbidden_names = ["name", "unknown", "resume", "applicant", "candidate", "full name"]
        if not name or name.strip().lower() in forbidden_names:
            name = get_name_fallback(text)
        # -------------------------------


        if name:
                # 4. Create VCard Block
            name = clean_extracted_string(name)
                vcard_block = create_vcard_string(data, creation_date)
            bg = clean_extracted_string(bg)
                vcf_file.write(vcard_block)
           
                 print(f"    -> Added: {data.get('name')} ({data.get('degree')})")
            safe_name = re.sub(r'[^\w\s-]', '', name)
                count += 1
            safe_bg = re.sub(r'[^\w\s-]', '', bg)
           
            new_filename = f"{date_str} {safe_name} {safe_bg}{file_ext}"
            new_filepath = os.path.join(FOLDER_PATH, new_filename)
           
            if filepath != new_filepath:
                 if not os.path.exists(new_filepath):
                    os.rename(filepath, new_filepath)
                    print(f"    -> Renamed: [{new_filename}]")
                    count_success += 1
                else:
                    print(f"    -> Duplicate: [{new_filename}]")
             else:
             else:
                 print("    -> No change.")
                 print("    -> Failed to extract data.")
        else:
            print(f"    -> AI Format Fail: {llm_output}")
            count_fail += 1
    else:
        print("    -> AI returned nothing.")
        count_fail += 1
 
print(f"\nDone! Renamed: {count_success} | Failed: {count_fail}")


    print(f"\nDone! Created {output_filename} with {count} contacts.")


if name == "main":
if __name__ == "__main__":
process_folder()
    process_to_vcf()
</syntaxhighlight>
</syntaxhighlight>

Latest revision as of 09:14, 4 February 2026

1. The Problem

Students and applicants rarely follow file naming conventions. You likely have a folder that looks like this:

Resume.pdf

CV_Final_v2.docx

MyResume(1).pdf

john_doe.pdf

This makes sorting by date or qualification impossible without opening every single file.

The Goal: Automatically rename these files based on their content to a standard format:

YYMMDD Name Degree/Background.pdf
Example: 250101 Juan Dela Cruz BS Information Technology.pdf

2. Requirements Checklist

Please ensure you have the following ready before starting.

[ ] Ubuntu 24.04 System.

[ ] Python 3.12+ (Pre-installed on Ubuntu 24.04).

[ ] Ollama installed locally (The AI engine).

[ ] A Small Language Model pulled (e.g., granite3.3:2b or llama3.2).

  • Note: Small models are fast but can make mistakes. The script has logic to catch these, but a human review is always recommended.

[ ] Python Libraries: pdfplumber (for PDFs), python-docx (for Word), requests (to talk to Ollama).

[ ] No Images: The files must have embedded text. This script excludes OCR (Optical Character Recognition) to keep it fast and lightweight. Pure image scans will be skipped.

3. How the Script Works (The Logic)

This script acts as a "Project Manager" that hires two distinct specialists to process each file. It does not blindly ask the AI for everything, as small AIs make mistakes with math and dates.

File Discovery:

    • The script looks for .pdf and .docx files in the folder where the script is located.

Text Extraction:

    • It pulls raw text. If the text is less than 50 characters (likely an image scan), it skips the file.

The Date Specialist (Python Regex):

    • Logic: It scans the text for explicit years (e.g., "2023", "2024").
    • Rule: It ignores the word "Present". Why? If a resume from 2022 says "2022 - Present", treating "Present" as "Today" (2026) would incorrectly date the old resume. We stick to the highest printed number.
    • Output: Sets the date to Jan 1st of the highest year found (e.g., 240101).

The Content Specialist (Ollama AI):

    • Logic: It sends the text to the local AI with strict instructions.
    • Rule 1 (Priority): It looks for a Degree (e.g., "BS IT") first. It is forbidden from using "Intern" or "Student" if a degree is found.
    • Rule 2 (Fallback): If the AI fails to find a name, the script grabs the first line of the document as a fallback.

Sanitization & Renaming:

    • It fixes "Spaced Names" (e.g., J O H N -> John).
    • It ensures the filename isn't too long.
    • It renames the file only if the name doesn't already exist.

4. Installation Guide (Ubuntu 24.04)

Open your terminal (Ctrl+Alt+T) and follow these steps exactly.

Step A: System Update

Ensure your system tools are fresh to avoid installation conflicts.

sudo apt update && sudo apt upgrade -y

Step B: Install Ollama & The Model

Install the Ollama Engine:

  1. curl -fsSL https://ollama.com/install.sh | sh
    

Download the Brain (The Model):

  1. We use granite3.3:2b because it is very fast.
    ollama pull granite3.3:2b
    

Step C: Setup Python Environment

Ubuntu 24.04 requires Virtual Environments (venv) for Python scripts.

Create a Project Folder:

  1. mkdir ~/resume_renamer
    cd ~/resume_renamer
    

Create the Virtual Environment:

  1. python3 -m venv venv
    

Activate the Environment:

  1. source venv/bin/activate
    
    (You should see (venv) at the start of your command line now).

Install Required Libraries:

  1. pip install requests pdfplumber python-docx
    

Step D: Create the Script

Create the python file:

  1. nano rename_resumes.py
    

Paste the Python code provided in the appendix below.

Save and exit: Press Ctrl+O, Enter, then Ctrl+X.

5. Running the Renamer

This script is portable. It works on the files sitting next to it.

Copy the Script: Move the rename_resumes.py file into your folder full of PDFs (e.g., ~/Documents/Student_CVs).

Open Terminal in that folder:

  1. cd ~/Documents/Student_CVs
    

Activate your Python Environment (Point to where you created it):

  1. source ./venv/bin/activate
    

Run the script:

  1. python3 rename_resumes.py
    

6. Common Errors & Troubleshooting

Error / Behavior Why it happens The Fix (Included in Script)
"Intern" instead of "Degree" The Resume had "INTERN" in big bold letters. The script's prompt explicitly forbids "Intern" if a Degree is found.
Wrong Date (e.g., 260101) The resume said "2021-Present" and the script assumed "Present" = 2026. We disabled "Present" logic. It now only trusts explicit numbers (e.g., 2021).
Spaced Names (J O H N) PDF formatting added spaces between letters. A Regex function detects single letters + spaces and collapses them.
Script Freezes Ollama is overwhelmed. We added a 60-second timeout and a 0.5s pause between files.
Skipped Files The PDF is a scanned image (no text). This is intended. You need an OCR tool for these (not included here).

Appendix: The Python Script

Rename Resumes Script

Copy the code below into rename_resumes.py.

# --- IMPROVED FUNCTION: SMART PDF READER (Skips Forms & Signature Pages) ---
def get_smart_pdf_text(filepath):
    """
    Reads PDF pages but SKIPS pages that look like 'Application Forms'.
    Returns the text of the first 2 'valid' resume pages found.
    """
    valid_text = ""
    pages_read = 0
    
    # Phrases that indicate a page is a FORM, not a Resume
    skip_phrases = [
        "APPLICATION FOR EMPLOYMENT", 
        "OFFICIAL USE ONLY", 
        "DO NOT WRITE BELOW THIS LINE",
        "PERSONAL DATA SHEET",
        "APPLICANT'S SIGNATURE",   # Found on Page 2 of your file
        "FAMILY BACKGROUND"        # Found on Page 2 of your file
    ]

    try:
        with pdfplumber.open(filepath) as pdf:
            for page in pdf.pages:
                text = page.extract_text() or ""
                
                # CHECK: Is this page just a form?
                # We check if ANY of the skip phrases appear in the text
                is_form = any(phrase in text.upper() for phrase in skip_phrases)
                
                if is_form:
                    print(f"    [INFO] Skipped a 'Form' page (found key phrase)...")
                    continue  # Skip this page, check the next one
                
                # If not a form, it's likely the resume. Keep it.
                valid_text += text + "\n"
                pages_read += 1
                
                # Stop after finding 2 valid pages of resume content
                if pages_read >= 2:
                    break
                    
    except Exception as e:
        print(f"    [ERROR] PDF Read Error: {e}")
        return ""
        
    return valid_text
# --------------------------------------

Ocr Converter Script

Copy the code below into ocr_converter.py. Of course the Renamer doesnt work with Image PDFs, so you have to convert this. Also this is only as good as the VISION model used.

python3 ocr_converter.py
import os
import subprocess
import pdfplumber

# Configuration
FOLDER_PATH = "."  # Current folder
MIN_TEXT_LENGTH = 50  # If text is less than this, we assume it's an image

def has_embedded_text(file_path):
    """Checks if a PDF already has text."""
    try:
        with pdfplumber.open(file_path) as pdf:
            full_text = ""
            for page in pdf.pages:
                text = page.extract_text()
                if text:
                    full_text += text
            
            # If we found enough text, return True
            if len(full_text.strip()) > MIN_TEXT_LENGTH:
                return True
    except Exception as e:
        print(f"Error reading {file_path}: {e}")
        return False
    return False

def ocr_file(file_path):
    """Runs OCRmyPDF on the file."""
    output_path = file_path.replace(".pdf", "_OCR.pdf")
    
    # Don't re-OCR if the output already exists
    if os.path.exists(output_path):
        print(f"Skipping {file_path} (OCR version already exists)")
        return

    print(f"🖼️  Image Detected: Converting {file_path}...")
    
    try:
        # Run the OCR command
        # --force-ocr: Process even if it thinks there is some text (often garbage in scans)
        # --deskew: Straighten crooked scans
        command = [
            "ocrmypdf", 
            "--force-ocr", 
            "--deskew", 
            file_path, 
            output_path
        ]
        
        result = subprocess.run(command, capture_output=True, text=True)
        
        if result.returncode == 0:
            print(f"✅ Success: Created {output_path}")
        else:
            print(f"❌ Failed to OCR {file_path}")
            print(result.stderr)
            
    except FileNotFoundError:
        print("❌ Error: 'ocrmypdf' is not installed. Run 'sudo apt install ocrmypdf' first.")

def main():
    print("🔍 Scanning for image-based PDFs...")
    files = [f for f in os.listdir(FOLDER_PATH) if f.lower().endswith(".pdf") and "_OCR" not in f]
    
    count = 0
    for filename in files:
        file_path = os.path.join(FOLDER_PATH, filename)
        
        if not has_embedded_text(file_path):
            ocr_file(file_path)
            count += 1
            
    if count == 0:
        print("🎉 No image-only PDFs found. All files differ have text!")
    else:
        print(f"\n✨ Processed {count} files.")

if __name__ == "__main__":
    main()

PDF 2 VCF Script

Copy the code below into pdf2vcf.py. This creates a bulk VCF file so you can load this into your contacts.

python3 pdf2vcf.py
import os
import requests
import json
import pdfplumber
import re
from datetime import datetime
import time

# --- CONFIGURATION ---
FOLDER_PATH = os.path.dirname(os.path.abspath(__file__))
OLLAMA_MODEL = "granite3.3:2b" 
# ---------------------

def get_timestamp():
    """Returns current YYMMDD-HHMMSS"""
    return datetime.now().strftime('%y%m%d-%H%M%S')

def get_short_date():
    """Returns current YYMMDD"""
    return datetime.now().strftime('%y%m%d')

# --- SMART PDF READER ---
def get_smart_pdf_text(filepath):
    """
    Reads PDF pages but SKIPS pages that look like 'Application Forms'.
    Returns the text of the first 2 'valid' resume pages found.
    """
    valid_text = ""
    pages_read = 0
    skip_phrases = [
        "APPLICATION FOR EMPLOYMENT", "OFFICIAL USE ONLY", 
        "DO NOT WRITE BELOW THIS LINE", "PERSONAL DATA SHEET",
        "APPLICANT'S SIGNATURE", "FAMILY BACKGROUND"
    ]

    try:
        with pdfplumber.open(filepath) as pdf:
            for page in pdf.pages:
                text = page.extract_text() or ""
                # CHECK: Is this page just a form?
                if any(phrase in text.upper() for phrase in skip_phrases):
                    continue 
                
                valid_text += text + "\n"
                pages_read += 1
                if pages_read >= 2: break     
    except Exception as e:
        print(f"    [ERROR] PDF Read Error: {e}")
        return ""
    return valid_text

def clean_text_for_llm(text):
    clean = " ".join(text.split())
    return clean[:6000]

def parse_name_from_filename(filename):
    """
    Fallback: Tries to guess the name from a filename like '260101 Kim Ong Diploma.pdf'
    """
    # Remove extension
    base = os.path.splitext(filename)[0]
    
    # Regex: Look for 6 digits at start, then text
    match = re.search(r'^\d{6}\s+(.*?)\s+(?:Bachelor|Diploma|Certificate|General|Master|PhD|Associate|Engineer|Architect)', base, re.IGNORECASE)
    if match:
        return match.group(1).strip()
    
    # Weaker Regex: Just take the first 3 words after the date
    match_weak = re.search(r'^\d{6}\s+([A-Za-z-]+\s+[A-Za-z-]+\s?[A-Za-z-]*)', base)
    if match_weak:
        return match_weak.group(1).strip()

    return None

def ask_ollama_extraction(text, filename):
    """
    Asks LLM to extract specific fields, using the FILENAME as a hint.
    """
    system_instruction = (
        "You are a Data Extraction Expert. Extract details from the resume.\n"
        f"CONTEXT: The file is named '{filename}'. This filename likely contains the correct spelling of the Name and Degree.\n"
        "\nRULES:\n"
        "1. **Double Check the Name:** If the resume text has OCR errors (e.g., 'K1m 0ng'), use the spelling from the Filename ('Kim Ong').\n"
        "2. **Extract:** Full Name, Educational Degree (Short), Email, Phone, and Summary.\n"
        "3. **Summary:** Write a concise 3-sentence summary of their key skills.\n"
        "\nRETURN JSON ONLY:\n"
        "{\n"
        '  "name": "John Doe",\n'
        '  "degree": "BS IT",\n'
        '  "email": "john@email.com",\n'
        '  "phone": "09123456789",\n'
        '  "summary": "Experienced in..."\n'
        "}"
    )

    prompt = f"Resume Text:\n{text}\n\n{system_instruction}"

    url = "http://localhost:11434/api/generate"
    data = {
        "model": OLLAMA_MODEL,
        "prompt": prompt,
        "stream": False,
        "format": "json", 
        "options": {"temperature": 0.1, "num_ctx": 4096}
    }

    try:
        response = requests.post(url, json=data, timeout=60)
        response.raise_for_status()
        result_json = response.json()['response']
        return json.loads(result_json)
    except Exception as e:
        print(f"    [Warning] AI Extraction failed: {e}")
        return None

def create_vcard_string(data, creation_date):
    """
    Formats the data into VCF 3.0 format.
    Format: Name Degree YYMMDD (All in First Name field for easy searching)
    """
    name = data.get("name", "Unknown")
    degree = data.get("degree", "")
    email = data.get("email", "")
    phone = data.get("phone", "")
    summary = data.get("summary", "")

    # Sanitize inputs
    if not name or name == "Unknown":
        name = "Unknown Candidate"
    
    complex_name = f"{name} {degree} {creation_date}".strip()
    
    vcf = [
        "BEGIN:VCARD",
        "VERSION:3.0",
        f"N:;{complex_name};;;", 
        f"FN:{complex_name}",
        f"TEL;TYPE=CELL:{phone}",
        f"EMAIL;TYPE=WORK:{email}",
        f"NOTE:{summary} (Extracted via AI)",
        f"REV:{datetime.now().isoformat()}",
        "END:VCARD"
    ]
    return "\n".join(vcf) + "\n"

def process_to_vcf():
    output_filename = f"{get_timestamp()}_Bulk_Import.vcf"
    output_path = os.path.join(FOLDER_PATH, output_filename)
    creation_date = get_short_date() 

    print(f"--- Smart Resume to VCF Exporter ---")
    print(f"Target Output: {output_filename}")
    
    count = 0
    
    with open(output_path, "w", encoding="utf-8") as vcf_file:
        
        for filename in os.listdir(FOLDER_PATH):
            if not filename.lower().endswith(".pdf"):
                continue

            filepath = os.path.join(FOLDER_PATH, filename)
            print(f"Processing: {filename}...")

            # 1. Get Text
            text = get_smart_pdf_text(filepath)
            if len(text) < 50:
                print("    [SKIP] Text too short/unreadable.")
                continue

            # 2. Extract Data (Passing filename for context)
            time.sleep(0.5) 
            data = ask_ollama_extraction(clean_text_for_llm(text), filename)

            if data:
                # 3. Double Check Name (Python Logic Fallback)
                # If AI gave a bad name, or "Unknown", try to grab it from the filename manually
                ai_name = data.get("name", "")
                if not ai_name or "unknown" in ai_name.lower() or any(char.isdigit() for char in ai_name):
                    fallback_name = parse_name_from_filename(filename)
                    if fallback_name:
                        print(f"    [Correction] Replaced '{ai_name}' with filename name: '{fallback_name}'")
                        data['name'] = fallback_name

                # 4. Create VCard Block
                vcard_block = create_vcard_string(data, creation_date)
                vcf_file.write(vcard_block)
                print(f"    -> Added: {data.get('name')} ({data.get('degree')})")
                count += 1
            else:
                print("    -> Failed to extract data.")

    print(f"\nDone! Created {output_filename} with {count} contacts.")

if __name__ == "__main__":
    process_to_vcf()